aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Changes10
-rw-r--r--Documentation/SubmittingPatches86
-rw-r--r--Documentation/device-mapper/snapshot.txt73
-rw-r--r--Documentation/keys.txt74
-rw-r--r--Documentation/networking/ip-sysctl.txt10
-rw-r--r--Documentation/sparse.txt4
-rw-r--r--MAINTAINERS45
-rw-r--r--Makefile2
-rw-r--r--README8
-rw-r--r--arch/alpha/kernel/entry.S1
-rw-r--r--arch/alpha/kernel/process.c4
-rw-r--r--arch/alpha/kernel/traps.c15
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/common/gic.c2
-rw-r--r--arch/arm/common/locomo.c1
-rw-r--r--arch/arm/configs/ixp4xx_defconfig577
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/io.c6
-rw-r--r--arch/arm/mach-clps711x/fortunet.c2
-rw-r--r--arch/arm/mach-imx/leds-mx1ads.c1
-rw-r--r--arch/arm/mach-iop3xx/common.c1
-rw-r--r--arch/arm/mach-iop3xx/iop321-time.c1
-rw-r--r--arch/arm/mach-iop3xx/iop331-time.c1
-rw-r--r--arch/arm/mach-iop3xx/iq31244-mm.c1
-rw-r--r--arch/arm/mach-iop3xx/iq80321-mm.c1
-rw-r--r--arch/arm/mach-iop3xx/iq80331-mm.c1
-rw-r--r--arch/arm/mach-iop3xx/iq80332-mm.c1
-rw-r--r--arch/arm/mach-ixp2000/core.c3
-rw-r--r--arch/arm/mach-ixp2000/pci.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c5
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c2
-rw-r--r--arch/arm/mach-l7200/core.c1
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c1
-rw-r--r--arch/arm/mach-pxa/generic.c1
-rw-r--r--arch/arm/mach-s3c2410/usb-simtec.c1
-rw-r--r--arch/arm/mach-versatile/core.c44
-rw-r--r--arch/arm/mach-versatile/pci.c1
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/cache-v6.S9
-rw-r--r--arch/arm/mm/flush.c36
-rw-r--r--arch/arm/plat-omap/common.c1
-rw-r--r--arch/arm/plat-omap/cpu-omap.c1
-rw-r--r--arch/arm/plat-omap/usb.c1
-rw-r--r--arch/i386/kernel/acpi/boot.c3
-rw-r--r--arch/i386/kernel/apic.c1
-rw-r--r--arch/i386/kernel/cpu/amd.c16
-rw-r--r--arch/i386/kernel/cpu/mcheck/k7.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/p4.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/i8259.c3
-rw-r--r--arch/i386/kernel/io_apic.c1
-rw-r--r--arch/i386/kernel/mpparse.c1
-rw-r--r--arch/i386/kernel/nmi.c1
-rw-r--r--arch/i386/kernel/process.c2
-rw-r--r--arch/i386/kernel/smp.c1
-rw-r--r--arch/i386/kernel/smpboot.c1
-rw-r--r--arch/i386/kernel/timers/timer_pit.c1
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/mach-default/setup.c1
-rw-r--r--arch/i386/mach-visws/setup.c1
-rw-r--r--arch/i386/mach-visws/visws_apic.c1
-rw-r--r--arch/i386/mach-voyager/setup.c1
-rw-r--r--arch/i386/mach-voyager/voyager_basic.c1
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mach-voyager/voyager_thread.c2
-rw-r--r--arch/i386/oprofile/nmi_timer_int.c2
-rw-r--r--arch/i386/pci/acpi.c1
-rw-r--r--arch/i386/pci/irq.c3
-rw-r--r--arch/i386/power/cpu.c17
-rw-r--r--arch/ia64/hp/sim/simscsi.c29
-rw-r--r--arch/ia64/kernel/mca_asm.S96
-rw-r--r--arch/ia64/kernel/mca_drv.c21
-rw-r--r--arch/m32r/Makefile2
-rw-r--r--arch/m32r/lib/usercopy.c16
-rw-r--r--arch/ppc/boot/ld.script2
-rw-r--r--arch/ppc/kernel/Makefile3
-rw-r--r--arch/ppc/kernel/perfmon.c7
-rw-r--r--arch/ppc/platforms/4xx/bamboo.c1
-rw-r--r--arch/ppc/platforms/4xx/ebony.c1
-rw-r--r--arch/ppc/platforms/4xx/luan.c1
-rw-r--r--arch/ppc/platforms/4xx/ocotea.c1
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.c1
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c1
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c1
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.c1
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c1
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c1
-rw-r--r--arch/ppc/platforms/85xx/sbc85xx.c1
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c1
-rw-r--r--arch/ppc/platforms/chestnut.c1
-rw-r--r--arch/ppc/platforms/chrp_setup.c1
-rw-r--r--arch/ppc/platforms/gemini_setup.c1
-rw-r--r--arch/ppc/platforms/mvme5100.c1
-rw-r--r--arch/ppc/platforms/pmac_cpufreq.c7
-rw-r--r--arch/ppc/platforms/pmac_feature.c4
-rw-r--r--arch/ppc/platforms/pmac_setup.c11
-rw-r--r--arch/ppc/platforms/powerpmc250.c1
-rw-r--r--arch/ppc/platforms/pplus.c1
-rw-r--r--arch/ppc/platforms/prpmc750.c1
-rw-r--r--arch/ppc/platforms/prpmc800.c1
-rw-r--r--arch/ppc/platforms/radstone_ppc7d.c2
-rw-r--r--arch/ppc/platforms/sandpoint.c1
-rw-r--r--arch/ppc/syslib/mv64x60.c4
-rw-r--r--arch/ppc/syslib/of_device.c6
-rw-r--r--arch/ppc/syslib/open_pic.c1
-rw-r--r--arch/ppc/syslib/open_pic2.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_setup.c1
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c6
-rw-r--r--arch/ppc64/kernel/kprobes.c8
-rw-r--r--arch/ppc64/kernel/machine_kexec.c1
-rw-r--r--arch/ppc64/kernel/of_device.c7
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c16
-rw-r--r--arch/ppc64/kernel/pmac_setup.c18
-rw-r--r--arch/ppc64/kernel/pmac_time.c4
-rw-r--r--arch/ppc64/kernel/ptrace.c1
-rw-r--r--arch/ppc64/mm/hash_native.c6
-rw-r--r--arch/ppc64/mm/hugetlbpage.c7
-rw-r--r--arch/ppc64/mm/tlb.c4
-rw-r--r--arch/s390/kernel/compat_signal.c6
-rw-r--r--arch/s390/kernel/signal.c4
-rw-r--r--arch/sparc/kernel/setup.c2
-rw-r--r--arch/sparc64/Kconfig.debug8
-rw-r--r--arch/sparc64/kernel/cpu.c4
-rw-r--r--arch/sparc64/kernel/devices.c22
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S13
-rw-r--r--arch/sparc64/kernel/dtlb_base.S8
-rw-r--r--arch/sparc64/kernel/entry.S180
-rw-r--r--arch/sparc64/kernel/head.S563
-rw-r--r--arch/sparc64/kernel/ktlb.S198
-rw-r--r--arch/sparc64/kernel/pci_schizo.c2
-rw-r--r--arch/sparc64/kernel/ptrace.c14
-rw-r--r--arch/sparc64/kernel/setup.c50
-rw-r--r--arch/sparc64/kernel/smp.c21
-rw-r--r--arch/sparc64/kernel/sys32.S170
-rw-r--r--arch/sparc64/kernel/trampoline.S16
-rw-r--r--arch/sparc64/kernel/traps.c100
-rw-r--r--arch/sparc64/kernel/una_asm.S65
-rw-r--r--arch/sparc64/kernel/unaligned.c53
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c5
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S16
-rw-r--r--arch/sparc64/lib/user_fixup.c63
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c69
-rw-r--r--arch/sparc64/mm/init.c905
-rw-r--r--arch/sparc64/mm/ultra.S96
-rw-r--r--arch/sparc64/prom/Makefile4
-rw-r--r--arch/sparc64/prom/console.c2
-rw-r--r--arch/sparc64/prom/devops.c2
-rw-r--r--arch/sparc64/prom/init.c5
-rw-r--r--arch/sparc64/prom/map.S72
-rw-r--r--arch/sparc64/prom/memory.c152
-rw-r--r--arch/sparc64/prom/misc.c34
-rw-r--r--arch/sparc64/prom/p1275.c2
-rw-r--r--arch/sparc64/prom/printf.c2
-rw-r--r--arch/sparc64/prom/tree.c50
-rw-r--r--arch/um/Makefile68
-rw-r--r--arch/um/Makefile-i38622
-rw-r--r--arch/um/Makefile-skas2
-rw-r--r--arch/um/Makefile-x86_6421
-rw-r--r--arch/um/drivers/chan_kern.c60
-rw-r--r--arch/um/drivers/port_kern.c1
-rw-r--r--arch/um/include/common-offsets.h4
-rw-r--r--arch/um/include/os.h4
-rw-r--r--arch/um/include/skas_ptregs.h6
-rw-r--r--arch/um/include/sysdep-i386/sc.h44
-rw-r--r--arch/um/include/sysdep-i386/sigcontext.h10
-rw-r--r--arch/um/include/sysdep-i386/thread.h11
-rw-r--r--arch/um/include/sysdep-x86_64/sc.h45
-rw-r--r--arch/um/include/sysdep-x86_64/sigcontext.h5
-rw-r--r--arch/um/include/sysdep-x86_64/thread.h10
-rw-r--r--arch/um/include/task.h9
-rw-r--r--arch/um/include/user.h4
-rw-r--r--arch/um/kernel/irq.c1
-rw-r--r--arch/um/kernel/process_kern.c3
-rw-r--r--arch/um/kernel/sigio_user.c2
-rw-r--r--arch/um/kernel/skas/Makefile2
-rw-r--r--arch/um/kernel/skas/util/Makefile5
-rw-r--r--arch/um/kernel/skas/util/mk_ptregs-i386.c49
-rw-r--r--arch/um/kernel/skas/util/mk_ptregs-x86_64.c66
-rw-r--r--arch/um/kernel/tlb.c12
-rw-r--r--arch/um/kernel/trap_kern.c21
-rw-r--r--arch/um/kernel/tt/uaccess_user.c11
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/um/kernel/umid.c41
-rw-r--r--arch/um/os-Linux/elf_aux.c2
-rw-r--r--arch/um/os-Linux/util/Makefile4
-rw-r--r--arch/um/os-Linux/util/mk_user_constants.c23
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--arch/um/sys-i386/kernel-offsets.c4
-rw-r--r--arch/um/sys-i386/user-offsets.c73
-rw-r--r--arch/um/sys-i386/util/Makefile5
-rw-r--r--arch/um/sys-i386/util/mk_sc.c51
-rw-r--r--arch/um/sys-i386/util/mk_thread.c22
-rw-r--r--arch/um/sys-x86_64/Makefile2
-rw-r--r--arch/um/sys-x86_64/kernel-offsets.c2
-rw-r--r--arch/um/sys-x86_64/user-offsets.c117
-rw-r--r--arch/um/sys-x86_64/util/Makefile8
-rw-r--r--arch/um/sys-x86_64/util/mk_sc.c47
-rw-r--r--arch/um/sys-x86_64/util/mk_thread.c20
-rw-r--r--arch/um/util/Makefile5
-rw-r--r--arch/um/util/mk_constants.c32
-rw-r--r--arch/um/util/mk_task.c30
-rw-r--r--arch/x86_64/kernel/kprobes.c8
-rw-r--r--arch/x86_64/kernel/mce.c10
-rw-r--r--arch/x86_64/kernel/setup.c27
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/mm/numa.c10
-rw-r--r--arch/xtensa/kernel/pci.c4
-rw-r--r--arch/xtensa/kernel/platform.c2
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--drivers/char/drm/drm_drv.c2
-rw-r--r--drivers/char/drm/drm_proc.c2
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/n_r3964.c84
-rw-r--r--drivers/char/watchdog/mv64x60_wdt.c14
-rw-r--r--drivers/connector/cn_queue.c32
-rw-r--r--drivers/connector/connector.c74
-rw-r--r--drivers/hwmon/Kconfig9
-rw-r--r--drivers/hwmon/hdaps.c21
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-pmac-smu.c316
-rw-r--r--drivers/ide/legacy/ide-cs.c2
-rw-r--r--drivers/ieee1394/amdtp.c1
-rw-r--r--drivers/ieee1394/csr1212.h1
-rw-r--r--drivers/ieee1394/dv1394.c1
-rw-r--r--drivers/ieee1394/eth1394.c12
-rw-r--r--drivers/ieee1394/eth1394.h6
-rw-r--r--drivers/ieee1394/hosts.c3
-rw-r--r--drivers/ieee1394/hosts.h8
-rw-r--r--drivers/ieee1394/ieee1394_core.c32
-rw-r--r--drivers/ieee1394/nodemgr.c23
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/ieee1394/raw1394.c1
-rw-r--r--drivers/ieee1394/sbp2.c107
-rw-r--r--drivers/ieee1394/video1394.c1
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c120
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c19
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c6
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hisax/st5481_b.c2
-rw-r--r--drivers/isdn/hisax/st5481_usb.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/macintosh/smu.c1032
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/md/dm-ioctl.c9
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/video/bttv-cards.c2
-rw-r--r--drivers/media/video/bttv-driver.c14
-rw-r--r--drivers/media/video/bttvp.h2
-rw-r--r--drivers/media/video/cpia.c2
-rw-r--r--drivers/media/video/rds.h2
-rw-r--r--drivers/media/video/saa6588.c4
-rw-r--r--drivers/mfd/ucb1x00-ts.c4
-rw-r--r--drivers/mtd/devices/docecc.c8
-rw-r--r--drivers/mtd/maps/bast-flash.c1
-rw-r--r--drivers/mtd/maps/ixp2000.c1
-rw-r--r--drivers/mtd/maps/ixp4xx.c3
-rw-r--r--drivers/mtd/maps/omap_nor.c1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/net/8390.c2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/am79c961a.c22
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c5237
-rw-r--r--drivers/net/cassini.h4425
-rw-r--r--drivers/net/cs89x0.c1
-rw-r--r--drivers/net/ibmveth.c6
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/sk98lin/skge.c8
-rw-r--r--drivers/net/skge.c46
-rw-r--r--drivers/net/tg3.c117
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/wan/sdlamain.c23
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/parisc/led.c5
-rw-r--r--drivers/pci/probe.c18
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/cardbus.c5
-rw-r--r--drivers/pcmcia/omap_cf.c1
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c10
-rw-r--r--drivers/pcmcia/ti113x.h115
-rw-r--r--drivers/pcmcia/yenta_socket.c62
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c41
-rw-r--r--drivers/scsi/3w-9xxx.c55
-rw-r--r--drivers/scsi/3w-9xxx.h17
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c283
-rw-r--r--drivers/scsi/aacraid/aacraid.h17
-rw-r--r--drivers/scsi/aacraid/comminit.c17
-rw-r--r--drivers/scsi/aacraid/commsup.c581
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c3
-rw-r--r--drivers/scsi/ata_piix.c1
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/libata-core.c81
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/megaraid.c70
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid9
-rw-r--r--drivers/scsi/megaraid/Makefile1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2805
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1142
-rw-r--r--drivers/scsi/mesh.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/scsi_scan.c96
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/serial/clps711x.c2
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/ioc4_serial.c12
-rw-r--r--drivers/serial/s3c2410.c6
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/host/ohci-lh7a404.c2
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/media/vicam.c4
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeon_pm.c14
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/aty/xlinit.c8
-rw-r--r--drivers/video/backlight/corgi_bl.c1
-rw-r--r--drivers/video/cyblafb.c11
-rw-r--r--drivers/video/i810/i810-i2c.c16
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/intelfb/intelfbdrv.c21
-rw-r--r--drivers/video/pxafb.c108
-rw-r--r--drivers/video/pxafb.h9
-rw-r--r--drivers/video/s3c2410fb.c4
-rw-r--r--fs/9p/conv.c157
-rw-r--r--fs/9p/fid.c176
-rw-r--r--fs/9p/fid.h7
-rw-r--r--fs/9p/v9fs.c8
-rw-r--r--fs/9p/vfs_dentry.c2
-rw-r--r--fs/9p/vfs_dir.c11
-rw-r--r--fs/9p/vfs_file.c88
-rw-r--r--fs/9p/vfs_inode.c95
-rw-r--r--fs/9p/vfs_super.c39
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/aio.c98
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/eventpoll.c8
-rw-r--r--fs/ext2/ialloc.c25
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ext3/ialloc.c29
-rw-r--r--fs/ext3/resize.c6
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/fuse/dir.c6
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/jfs/inode.c3
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c15
-rw-r--r--fs/jfs/jfs_txnmgr.h1
-rw-r--r--fs/nfs/read.c5
-rw-r--r--fs/ntfs/ChangeLog10
-rw-r--r--fs/ntfs/layout.h8
-rw-r--r--fs/ntfs/lcnalloc.c31
-rw-r--r--fs/ntfs/lcnalloc.h27
-rw-r--r--fs/ntfs/logfile.c30
-rw-r--r--fs/ntfs/logfile.h2
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/proc/base.c8
-rw-r--r--fs/read_write.c3
-rw-r--r--include/asm-alpha/compiler.h5
-rw-r--r--include/asm-arm/arch-rpc/hardware.h4
-rw-r--r--include/asm-arm/arch-s3c2410/anubis-map.h10
-rw-r--r--include/asm-arm/arch-versatile/io.h6
-rw-r--r--include/asm-arm/elf.h2
-rw-r--r--include/asm-arm/io.h6
-rw-r--r--include/asm-arm/signal.h1
-rw-r--r--include/asm-generic/pgtable.h13
-rw-r--r--include/asm-i386/hw_irq.h2
-rw-r--r--include/asm-ia64/mca.h5
-rw-r--r--include/asm-ia64/uaccess.h12
-rw-r--r--include/asm-m32r/io.h2
-rw-r--r--include/asm-m32r/uaccess.h10
-rw-r--r--include/asm-mips/pgtable.h2
-rw-r--r--include/asm-ppc/io.h20
-rw-r--r--include/asm-ppc/macio.h1
-rw-r--r--include/asm-ppc/mv64x60.h4
-rw-r--r--include/asm-ppc/of_device.h5
-rw-r--r--include/asm-ppc64/smu.h365
-rw-r--r--include/asm-ppc64/tlbflush.h1
-rw-r--r--include/asm-ppc64/uaccess.h6
-rw-r--r--include/asm-s390/sigcontext.h2
-rw-r--r--include/asm-s390/signal.h2
-rw-r--r--include/asm-sparc/pgtable.h2
-rw-r--r--include/asm-sparc64/cacheflush.h5
-rw-r--r--include/asm-sparc64/cpudata.h10
-rw-r--r--include/asm-sparc64/head.h9
-rw-r--r--include/asm-sparc64/openprom.h4
-rw-r--r--include/asm-sparc64/oplib.h66
-rw-r--r--include/asm-sparc64/page.h17
-rw-r--r--include/asm-sparc64/pgtable.h14
-rw-r--r--include/asm-sparc64/uaccess.h24
-rw-r--r--include/asm-um/pgtable.h1
-rw-r--r--include/asm-um/uaccess.h2
-rw-r--r--include/asm-x86_64/msr.h1
-rw-r--r--include/asm-x86_64/pgtable.h2
-rw-r--r--include/asm-xtensa/atomic.h2
-rw-r--r--include/asm-xtensa/bitops.h2
-rw-r--r--include/asm-xtensa/hardirq.h1
-rw-r--r--include/asm-xtensa/semaphore.h49
-rw-r--r--include/asm-xtensa/system.h16
-rw-r--r--include/linux/aio.h34
-rw-r--r--include/linux/connector.h21
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/inetdevice.h12
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/key-ui.h28
-rw-r--r--include/linux/key.h78
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mod_devicetable.h7
-rw-r--r--include/linux/netdevice.h89
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h25
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_pptp.h125
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_tuple.h6
-rw-r--r--include/linux/netfilter_ipv4/ip_nat_core.h12
-rw-r--r--include/linux/pci_ids.h19
-rw-r--r--include/linux/reboot.h4
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/sysctl.h26
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h2
-rw-r--r--include/net/inet6_hashtables.h21
-rw-r--r--include/net/inet_hashtables.h64
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/llc.h30
-rw-r--r--include/net/llc_conn.h15
-rw-r--r--include/net/llc_sap.h8
-rw-r--r--include/net/sock.h5
-rw-r--r--include/pcmcia/ss.h9
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--ipc/mqueue.c1
-rw-r--r--kernel/cpuset.c11
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/params.c10
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/power.h7
-rw-r--r--kernel/power/swsusp.c29
-rw-r--r--kernel/signal.c34
-rw-r--r--kernel/sys.c52
-rw-r--r--mm/bootmem.c14
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/slab.c70
-rw-r--r--mm/swapfile.c1
-rw-r--r--net/802/p8022.c2
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/802/tr.c2
-rw-r--r--net/appletalk/ddp.c31
-rw-r--r--net/atm/addr.c6
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/common.c6
-rw-r--r--net/atm/ioctl.c34
-rw-r--r--net/atm/lec.c43
-rw-r--r--net/atm/signaling.c8
-rw-r--r--net/atm/svc.c1
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/core/datagram.c81
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/neighbour.c25
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c26
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c20
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ethernet/eth.c31
-rw-r--r--net/ieee80211/ieee80211_module.c2
-rw-r--r--net/ieee80211/ieee80211_tx.c2
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/devinet.c22
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ipmr.c6
-rw-r--r--net/ipv4/netfilter/Kconfig11
-rw-r--r--net/ipv4/netfilter/Makefile7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c49
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c17
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_gre.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c35
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c25
-rw-r--r--net/ipv4/netfilter/ip_queue.c4
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/netfilter/Kconfig11
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c4
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/ipv6/udp.c14
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/irda/irttp.c16
-rw-r--r--net/llc/Makefile1
-rw-r--r--net/llc/af_llc.c501
-rw-r--r--net/llc/llc_c_ac.c271
-rw-r--r--net/llc/llc_c_ev.c157
-rw-r--r--net/llc/llc_conn.c212
-rw-r--r--net/llc/llc_core.c34
-rw-r--r--net/llc/llc_if.c11
-rw-r--r--net/llc/llc_input.c19
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/llc/llc_s_ac.c16
-rw-r--r--net/llc/llc_sap.c20
-rw-r--r--net/llc/llc_station.c25
-rw-r--r--net/llc/sysctl_net_llc.c131
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rose/af_rose.c26
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/socket.c17
-rw-r--r--net/sysctl_net.c2
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--security/keys/internal.h26
-rw-r--r--security/keys/key.c81
-rw-r--r--security/keys/keyctl.c301
-rw-r--r--security/keys/keyring.c86
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/keys/process_keys.c164
-rw-r--r--security/keys/request_key.c36
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/selinux/hooks.c30
-rw-r--r--sound/oss/au1000.c2
-rw-r--r--sound/oss/ite8172.c2
-rw-r--r--sound/pci/atiixp_modem.c16
588 files changed, 22511 insertions, 6350 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 5eaab0441d76..27232be26e1a 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -237,6 +237,12 @@ udev
237udev is a userspace application for populating /dev dynamically with 237udev is a userspace application for populating /dev dynamically with
238only entries for devices actually present. udev replaces devfs. 238only entries for devices actually present. udev replaces devfs.
239 239
240FUSE
241----
242
243Needs libfuse 2.4.0 or later. Absolute minimum is 2.3.0 but mount
244options 'direct_io' and 'kernel_cache' won't work.
245
240Networking 246Networking
241========== 247==========
242 248
@@ -390,6 +396,10 @@ udev
390---- 396----
391o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html> 397o <http://www.kernel.org/pub/linux/utils/kernel/hotplug/udev.html>
392 398
399FUSE
400----
401o <http://sourceforge.net/projects/fuse>
402
393Networking 403Networking
394********** 404**********
395 405
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 7f43b040311e..237d54c44bc5 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -301,8 +301,84 @@ now, but you can do this to mark internal company procedures or just
301point out some special detail about the sign-off. 301point out some special detail about the sign-off.
302 302
303 303
30412) The canonical patch format
304 305
30512) More references for submitting patches 306The canonical patch subject line is:
307
308 Subject: [PATCH 001/123] subsystem: summary phrase
309
310The canonical patch message body contains the following:
311
312 - A "from" line specifying the patch author.
313
314 - An empty line.
315
316 - The body of the explanation, which will be copied to the
317 permanent changelog to describe this patch.
318
319 - The "Signed-off-by:" lines, described above, which will
320 also go in the changelog.
321
322 - A marker line containing simply "---".
323
324 - Any additional comments not suitable for the changelog.
325
326 - The actual patch (diff output).
327
328The Subject line format makes it very easy to sort the emails
329alphabetically by subject line - pretty much any email reader will
330support that - since because the sequence number is zero-padded,
331the numerical and alphabetic sort is the same.
332
333The "subsystem" in the email's Subject should identify which
334area or subsystem of the kernel is being patched.
335
336The "summary phrase" in the email's Subject should concisely
337describe the patch which that email contains. The "summary
338phrase" should not be a filename. Do not use the same "summary
339phrase" for every patch in a whole patch series.
340
341Bear in mind that the "summary phrase" of your email becomes
342a globally-unique identifier for that patch. It propagates
343all the way into the git changelog. The "summary phrase" may
344later be used in developer discussions which refer to the patch.
345People will want to google for the "summary phrase" to read
346discussion regarding that patch.
347
348A couple of example Subjects:
349
350 Subject: [patch 2/5] ext2: improve scalability of bitmap searching
351 Subject: [PATCHv2 001/207] x86: fix eflags tracking
352
353The "from" line must be the very first line in the message body,
354and has the form:
355
356 From: Original Author <author@example.com>
357
358The "from" line specifies who will be credited as the author of the
359patch in the permanent changelog. If the "from" line is missing,
360then the "From:" line from the email header will be used to determine
361the patch author in the changelog.
362
363The explanation body will be committed to the permanent source
364changelog, so should make sense to a competent reader who has long
365since forgotten the immediate details of the discussion that might
366have led to this patch.
367
368The "---" marker line serves the essential purpose of marking for patch
369handling tools where the changelog message ends.
370
371One good use for the additional comments after the "---" marker is for
372a diffstat, to show what files have changed, and the number of inserted
373and deleted lines per file. A diffstat is especially useful on bigger
374patches. Other comments relevant only to the moment or the maintainer,
375not suitable for the permanent changelog, should also go here.
376
377See more details on the proper patch format in the following
378references.
379
380
38113) More references for submitting patches
306 382
307Andrew Morton, "The perfect patch" (tpp). 383Andrew Morton, "The perfect patch" (tpp).
308 <http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt> 384 <http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt>
@@ -310,6 +386,14 @@ Andrew Morton, "The perfect patch" (tpp).
310Jeff Garzik, "Linux kernel patch submission format." 386Jeff Garzik, "Linux kernel patch submission format."
311 <http://linux.yyz.us/patch-format.html> 387 <http://linux.yyz.us/patch-format.html>
312 388
389Greg KH, "How to piss off a kernel subsystem maintainer"
390 <http://www.kroah.com/log/2005/03/31/>
391
392Kernel Documentation/CodingStyle
393 <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle>
394
395Linus Torvald's mail on the canonical patch format:
396 <http://lkml.org/lkml/2005/4/7/183>
313 397
314 398
315----------------------------------- 399-----------------------------------
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
new file mode 100644
index 000000000000..dca274ff4005
--- /dev/null
+++ b/Documentation/device-mapper/snapshot.txt
@@ -0,0 +1,73 @@
1Device-mapper snapshot support
2==============================
3
4Device-mapper allows you, without massive data copying:
5
6*) To create snapshots of any block device i.e. mountable, saved states of
7the block device which are also writable without interfering with the
8original content;
9*) To create device "forks", i.e. multiple different versions of the
10same data stream.
11
12
13In both cases, dm copies only the chunks of data that get changed and
14uses a separate copy-on-write (COW) block device for storage.
15
16
17There are two dm targets available: snapshot and snapshot-origin.
18
19*) snapshot-origin <origin>
20
21which will normally have one or more snapshots based on it.
22You must create the snapshot-origin device before you can create snapshots.
23Reads will be mapped directly to the backing device. For each write, the
24original data will be saved in the <COW device> of each snapshot to keep
25its visible content unchanged, at least until the <COW device> fills up.
26
27
28*) snapshot <origin> <COW device> <persistent?> <chunksize>
29
30A snapshot is created of the <origin> block device. Changed chunks of
31<chunksize> sectors will be stored on the <COW device>. Writes will
32only go to the <COW device>. Reads will come from the <COW device> or
33from <origin> for unchanged data. <COW device> will often be
34smaller than the origin and if it fills up the snapshot will become
35useless and be disabled, returning errors. So it is important to monitor
36the amount of free space and expand the <COW device> before it fills up.
37
38<persistent?> is P (Persistent) or N (Not persistent - will not survive
39after reboot).
40
41
42How this is used by LVM2
43========================
44When you create the first LVM2 snapshot of a volume, four dm devices are used:
45
461) a device containing the original mapping table of the source volume;
472) a device used as the <COW device>;
483) a "snapshot" device, combining #1 and #2, which is the visible snapshot
49 volume;
504) the "original" volume (which uses the device number used by the original
51 source volume), whose table is replaced by a "snapshot-origin" mapping
52 from device #1.
53
54A fixed naming scheme is used, so with the following commands:
55
56lvcreate -L 1G -n base volumeGroup
57lvcreate -L 100M --snapshot -n snap volumeGroup/base
58
59we'll have this situation (with volumes in above order):
60
61# dmsetup table|grep volumeGroup
62
63volumeGroup-base-real: 0 2097152 linear 8:19 384
64volumeGroup-snap-cow: 0 204800 linear 8:19 2097536
65volumeGroup-snap: 0 2097152 snapshot 254:11 254:12 P 16
66volumeGroup-base: 0 2097152 snapshot-origin 254:11
67
68# ls -lL /dev/mapper/volumeGroup-*
69brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real
70brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow
71brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap
72brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base
73
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 0321ded4b9ae..b22e7c8d059a 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -195,8 +195,8 @@ KEY ACCESS PERMISSIONS
195====================== 195======================
196 196
197Keys have an owner user ID, a group access ID, and a permissions mask. The mask 197Keys have an owner user ID, a group access ID, and a permissions mask. The mask
198has up to eight bits each for user, group and other access. Only five of each 198has up to eight bits each for possessor, user, group and other access. Only
199set of eight bits are defined. These permissions granted are: 199five of each set of eight bits are defined. These permissions granted are:
200 200
201 (*) View 201 (*) View
202 202
@@ -241,16 +241,16 @@ about the status of the key service:
241 type, description and permissions. The payload of the key is not available 241 type, description and permissions. The payload of the key is not available
242 this way: 242 this way:
243 243
244 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY 244 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY
245 00000001 I----- 39 perm 1f0000 0 0 keyring _uid_ses.0: 1/4 245 00000001 I----- 39 perm 1f1f0000 0 0 keyring _uid_ses.0: 1/4
246 00000002 I----- 2 perm 1f0000 0 0 keyring _uid.0: empty 246 00000002 I----- 2 perm 1f1f0000 0 0 keyring _uid.0: empty
247 00000007 I----- 1 perm 1f0000 0 0 keyring _pid.1: empty 247 00000007 I----- 1 perm 1f1f0000 0 0 keyring _pid.1: empty
248 0000018d I----- 1 perm 1f0000 0 0 keyring _pid.412: empty 248 0000018d I----- 1 perm 1f1f0000 0 0 keyring _pid.412: empty
249 000004d2 I--Q-- 1 perm 1f0000 32 -1 keyring _uid.32: 1/4 249 000004d2 I--Q-- 1 perm 1f1f0000 32 -1 keyring _uid.32: 1/4
250 000004d3 I--Q-- 3 perm 1f0000 32 -1 keyring _uid_ses.32: empty 250 000004d3 I--Q-- 3 perm 1f1f0000 32 -1 keyring _uid_ses.32: empty
251 00000892 I--QU- 1 perm 1f0000 0 0 user metal:copper: 0 251 00000892 I--QU- 1 perm 1f000000 0 0 user metal:copper: 0
252 00000893 I--Q-N 1 35s 1f0000 0 0 user metal:silver: 0 252 00000893 I--Q-N 1 35s 1f1f0000 0 0 user metal:silver: 0
253 00000894 I--Q-- 1 10h 1f0000 0 0 user metal:gold: 0 253 00000894 I--Q-- 1 10h 001f0000 0 0 user metal:gold: 0
254 254
255 The flags are: 255 The flags are:
256 256
@@ -637,6 +637,34 @@ call, and the key released upon close. How to deal with conflicting keys due to
637two different users opening the same file is left to the filesystem author to 637two different users opening the same file is left to the filesystem author to
638solve. 638solve.
639 639
640Note that there are two different types of pointers to keys that may be
641encountered:
642
643 (*) struct key *
644
645 This simply points to the key structure itself. Key structures will be at
646 least four-byte aligned.
647
648 (*) key_ref_t
649
650 This is equivalent to a struct key *, but the least significant bit is set
651 if the caller "possesses" the key. By "possession" it is meant that the
652 calling processes has a searchable link to the key from one of its
653 keyrings. There are three functions for dealing with these:
654
655 key_ref_t make_key_ref(const struct key *key,
656 unsigned long possession);
657
658 struct key *key_ref_to_ptr(const key_ref_t key_ref);
659
660 unsigned long is_key_possessed(const key_ref_t key_ref);
661
662 The first function constructs a key reference from a key pointer and
663 possession information (which must be 0 or 1 and not any other value).
664
665 The second function retrieves the key pointer from a reference and the
666 third retrieves the possession flag.
667
640When accessing a key's payload contents, certain precautions must be taken to 668When accessing a key's payload contents, certain precautions must be taken to
641prevent access vs modification races. See the section "Notes on accessing 669prevent access vs modification races. See the section "Notes on accessing
642payload contents" for more information. 670payload contents" for more information.
@@ -665,7 +693,11 @@ payload contents" for more information.
665 693
666 void key_put(struct key *key); 694 void key_put(struct key *key);
667 695
668 This can be called from interrupt context. If CONFIG_KEYS is not set then 696 Or:
697
698 void key_ref_put(key_ref_t key_ref);
699
700 These can be called from interrupt context. If CONFIG_KEYS is not set then
669 the argument will not be parsed. 701 the argument will not be parsed.
670 702
671 703
@@ -689,13 +721,17 @@ payload contents" for more information.
689 721
690(*) If a keyring was found in the search, this can be further searched by: 722(*) If a keyring was found in the search, this can be further searched by:
691 723
692 struct key *keyring_search(struct key *keyring, 724 key_ref_t keyring_search(key_ref_t keyring_ref,
693 const struct key_type *type, 725 const struct key_type *type,
694 const char *description) 726 const char *description)
695 727
696 This searches the keyring tree specified for a matching key. Error ENOKEY 728 This searches the keyring tree specified for a matching key. Error ENOKEY
697 is returned upon failure. If successful, the returned key will need to be 729 is returned upon failure (use IS_ERR/PTR_ERR to determine). If successful,
698 released. 730 the returned key will need to be released.
731
732 The possession attribute from the keyring reference is used to control
733 access through the permissions mask and is propagated to the returned key
734 reference pointer if successful.
699 735
700 736
701(*) To check the validity of a key, this function can be called: 737(*) To check the validity of a key, this function can be called:
@@ -732,7 +768,7 @@ More complex payload contents must be allocated and a pointer to them set in
732key->payload.data. One of the following ways must be selected to access the 768key->payload.data. One of the following ways must be selected to access the
733data: 769data:
734 770
735 (1) Unmodifyable key type. 771 (1) Unmodifiable key type.
736 772
737 If the key type does not have a modify method, then the key's payload can 773 If the key type does not have a modify method, then the key's payload can
738 be accessed without any form of locking, provided that it's known to be 774 be accessed without any form of locking, provided that it's known to be
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ab65714d95fc..b433c8a27e2d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -355,10 +355,14 @@ ip_dynaddr - BOOLEAN
355 Default: 0 355 Default: 0
356 356
357icmp_echo_ignore_all - BOOLEAN 357icmp_echo_ignore_all - BOOLEAN
358 If set non-zero, then the kernel will ignore all ICMP ECHO
359 requests sent to it.
360 Default: 0
361
358icmp_echo_ignore_broadcasts - BOOLEAN 362icmp_echo_ignore_broadcasts - BOOLEAN
359 If either is set to true, then the kernel will ignore either all 363 If set non-zero, then the kernel will ignore all ICMP ECHO and
360 ICMP ECHO requests sent to it or just those to broadcast/multicast 364 TIMESTAMP requests sent to it via broadcast/multicast.
361 addresses, respectively. 365 Default: 1
362 366
363icmp_ratelimit - INTEGER 367icmp_ratelimit - INTEGER
364 Limit the maximal rates for sending ICMP packets whose type matches 368 Limit the maximal rates for sending ICMP packets whose type matches
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 5df44dc894e5..1829009db771 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -51,9 +51,9 @@ or you don't get any checking at all.
51Where to get sparse 51Where to get sparse
52~~~~~~~~~~~~~~~~~~~ 52~~~~~~~~~~~~~~~~~~~
53 53
54With BK, you can just get it from 54With git, you can just get it from
55 55
56 bk://sparse.bkbits.net/sparse 56 rsync://rsync.kernel.org/pub/scm/devel/sparse/sparse.git
57 57
58and DaveJ has tar-balls at 58and DaveJ has tar-balls at
59 59
diff --git a/MAINTAINERS b/MAINTAINERS
index 78aca12101a0..abf7f7a17ae0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -604,6 +604,15 @@ P: H. Peter Anvin
604M: hpa@zytor.com 604M: hpa@zytor.com
605S: Maintained 605S: Maintained
606 606
607CPUSETS
608P: Paul Jackson
609P: Simon Derr
610M: pj@sgi.com
611M: simon.derr@bull.net
612L: linux-kernel@vger.kernel.org
613W: http://www.bullopensource.org/cpuset/
614S: Supported
615
607CRAMFS FILESYSTEM 616CRAMFS FILESYSTEM
608W: http://sourceforge.net/projects/cramfs/ 617W: http://sourceforge.net/projects/cramfs/
609S: Orphan 618S: Orphan
@@ -1159,11 +1168,6 @@ L: linux1394-devel@lists.sourceforge.net
1159W: http://www.linux1394.org/ 1168W: http://www.linux1394.org/
1160S: Orphan 1169S: Orphan
1161 1170
1162IEEE 1394 SBP2
1163L: linux1394-devel@lists.sourceforge.net
1164W: http://www.linux1394.org/
1165S: Orphan
1166
1167IEEE 1394 SUBSYSTEM 1171IEEE 1394 SUBSYSTEM
1168P: Ben Collins 1172P: Ben Collins
1169M: bcollins@debian.org 1173M: bcollins@debian.org
@@ -1198,6 +1202,15 @@ L: linux1394-devel@lists.sourceforge.net
1198W: http://www.linux1394.org/ 1202W: http://www.linux1394.org/
1199S: Maintained 1203S: Maintained
1200 1204
1205IEEE 1394 SBP2
1206P: Ben Collins
1207M: bcollins@debian.org
1208P: Stefan Richter
1209M: stefanr@s5r6.in-berlin.de
1210L: linux1394-devel@lists.sourceforge.net
1211W: http://www.linux1394.org/
1212S: Maintained
1213
1201IMS TWINTURBO FRAMEBUFFER DRIVER 1214IMS TWINTURBO FRAMEBUFFER DRIVER
1202P: Paul Mundt 1215P: Paul Mundt
1203M: lethal@chaoticdreams.org 1216M: lethal@chaoticdreams.org
@@ -1402,6 +1415,18 @@ L: linux-kernel@vger.kernel.org
1402L: fastboot@osdl.org 1415L: fastboot@osdl.org
1403S: Maintained 1416S: Maintained
1404 1417
1418KPROBES
1419P: Prasanna S Panchamukhi
1420M: prasanna@in.ibm.com
1421P: Ananth N Mavinakayanahalli
1422M: ananth@in.ibm.com
1423P: Anil S Keshavamurthy
1424M: anil.s.keshavamurthy@intel.com
1425P: David S. Miller
1426M: davem@davemloft.net
1427L: linux-kernel@vger.kernel.org
1428S: Maintained
1429
1405LANMEDIA WAN CARD DRIVER 1430LANMEDIA WAN CARD DRIVER
1406P: Andrew Stanley-Jones 1431P: Andrew Stanley-Jones
1407M: asj@lanmedia.com 1432M: asj@lanmedia.com
@@ -1722,8 +1747,11 @@ S: Maintained
1722IPVS 1747IPVS
1723P: Wensong Zhang 1748P: Wensong Zhang
1724M: wensong@linux-vs.org 1749M: wensong@linux-vs.org
1750P: Simon Horman
1751M: horms@verge.net.au
1725P: Julian Anastasov 1752P: Julian Anastasov
1726M: ja@ssi.bg 1753M: ja@ssi.bg
1754L: netdev@vger.kernel.org
1727S: Maintained 1755S: Maintained
1728 1756
1729NFS CLIENT 1757NFS CLIENT
@@ -1894,6 +1922,13 @@ M: joern@wh.fh-wedel.de
1894L: linux-mtd@lists.infradead.org 1922L: linux-mtd@lists.infradead.org
1895S: Maintained 1923S: Maintained
1896 1924
1925PKTCDVD DRIVER
1926P: Peter Osterlund
1927M: petero2@telia.com
1928L: linux-kernel@vger.kernel.org
1929L: packet-writing@suse.com
1930S: Maintained
1931
1897POSIX CLOCKS and TIMERS 1932POSIX CLOCKS and TIMERS
1898P: George Anzinger 1933P: George Anzinger
1899M: george@mvista.com 1934M: george@mvista.com
diff --git a/Makefile b/Makefile
index 8cf6becf68dc..fdb96bc85080 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 14 3SUBLEVEL = 14
4EXTRAVERSION =-rc2 4EXTRAVERSION =-rc3
5NAME=Affluent Albatross 5NAME=Affluent Albatross
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/README b/README
index 2b5844d8cfa0..d1edcc7adabe 100644
--- a/README
+++ b/README
@@ -151,7 +151,7 @@ CONFIGURING the kernel:
151 your existing ./.config file. 151 your existing ./.config file.
152 "make silentoldconfig" 152 "make silentoldconfig"
153 Like above, but avoids cluttering the screen 153 Like above, but avoids cluttering the screen
154 with question already answered. 154 with questions already answered.
155 155
156 NOTES on "make config": 156 NOTES on "make config":
157 - having unnecessary drivers will make the kernel bigger, and can 157 - having unnecessary drivers will make the kernel bigger, and can
@@ -199,9 +199,9 @@ COMPILING the kernel:
199 are installing a new kernel with the same version number as your 199 are installing a new kernel with the same version number as your
200 working kernel, make a backup of your modules directory before you 200 working kernel, make a backup of your modules directory before you
201 do a "make modules_install". 201 do a "make modules_install".
202 In alternative, before compiling, edit your Makefile and change the 202 Alternatively, before compiling, use the kernel config option
203 "EXTRAVERSION" line - its content is appended to the regular kernel 203 "LOCALVERSION" to append a unique suffix to the regular kernel version.
204 version. 204 LOCALVERSION can be set in the "General Setup" menu.
205 205
206 - In order to boot your new kernel, you'll need to copy the kernel 206 - In order to boot your new kernel, you'll need to copy the kernel
207 image (e.g. .../linux/arch/i386/boot/bzImage after compilation) 207 image (e.g. .../linux/arch/i386/boot/bzImage after compilation)
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 76cc0cb5fc2e..e38671c922bc 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -196,6 +196,7 @@ entUna:
196 stq $26, 208($sp) 196 stq $26, 208($sp)
197 stq $27, 216($sp) 197 stq $27, 216($sp)
198 stq $28, 224($sp) 198 stq $28, 224($sp)
199 mov $sp, $19
199 stq $gp, 232($sp) 200 stq $gp, 232($sp)
200 lda $8, 0x3fff 201 lda $8, 0x3fff
201 stq $31, 248($sp) 202 stq $31, 248($sp)
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index fa98dae3cd98..eb20c3afff58 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -127,6 +127,10 @@ common_shutdown_1(void *generic_ptr)
127 /* If booted from SRM, reset some of the original environment. */ 127 /* If booted from SRM, reset some of the original environment. */
128 if (alpha_using_srm) { 128 if (alpha_using_srm) {
129#ifdef CONFIG_DUMMY_CONSOLE 129#ifdef CONFIG_DUMMY_CONSOLE
130 /* If we've gotten here after SysRq-b, leave interrupt
131 context before taking over the console. */
132 if (in_interrupt())
133 irq_exit();
130 /* This has the effect of resetting the VGA video origin. */ 134 /* This has the effect of resetting the VGA video origin. */
131 take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); 135 take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
132#endif 136#endif
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 6f509a644bdd..f9d12319e0fb 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -446,16 +446,15 @@ struct unaligned_stat {
446 446
447 447
448/* Macro for exception fixup code to access integer registers. */ 448/* Macro for exception fixup code to access integer registers. */
449#define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) 449#define una_reg(r) (regs->regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
450 450
451 451
452asmlinkage void 452asmlinkage void
453do_entUna(void * va, unsigned long opcode, unsigned long reg, 453do_entUna(void * va, unsigned long opcode, unsigned long reg,
454 unsigned long a3, unsigned long a4, unsigned long a5, 454 struct allregs *regs)
455 struct allregs regs)
456{ 455{
457 long error, tmp1, tmp2, tmp3, tmp4; 456 long error, tmp1, tmp2, tmp3, tmp4;
458 unsigned long pc = regs.pc - 4; 457 unsigned long pc = regs->pc - 4;
459 const struct exception_table_entry *fixup; 458 const struct exception_table_entry *fixup;
460 459
461 unaligned[0].count++; 460 unaligned[0].count++;
@@ -636,7 +635,7 @@ got_exception:
636 printk("Forwarding unaligned exception at %lx (%lx)\n", 635 printk("Forwarding unaligned exception at %lx (%lx)\n",
637 pc, newpc); 636 pc, newpc);
638 637
639 (&regs)->pc = newpc; 638 regs->pc = newpc;
640 return; 639 return;
641 } 640 }
642 641
@@ -650,7 +649,7 @@ got_exception:
650 current->comm, current->pid); 649 current->comm, current->pid);
651 650
652 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", 651 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
653 pc, una_reg(26), regs.ps); 652 pc, una_reg(26), regs->ps);
654 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", 653 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
655 una_reg(0), una_reg(1), una_reg(2)); 654 una_reg(0), una_reg(1), una_reg(2));
656 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", 655 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
@@ -670,10 +669,10 @@ got_exception:
670 una_reg(22), una_reg(23), una_reg(24)); 669 una_reg(22), una_reg(23), una_reg(24));
671 printk("r25= %016lx r27= %016lx r28= %016lx\n", 670 printk("r25= %016lx r27= %016lx r28= %016lx\n",
672 una_reg(25), una_reg(27), una_reg(28)); 671 una_reg(25), una_reg(27), una_reg(28));
673 printk("gp = %016lx sp = %p\n", regs.gp, &regs+1); 672 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
674 673
675 dik_show_code((unsigned int *)pc); 674 dik_show_code((unsigned int *)pc);
676 dik_show_trace((unsigned long *)(&regs+1)); 675 dik_show_trace((unsigned long *)(regs+1));
677 676
678 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { 677 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
679 printk("die_if_kernel recursion detected.\n"); 678 printk("die_if_kernel recursion detected.\n");
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 130e6228b587..7779f2d1acad 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -175,10 +175,10 @@ else
175endif 175endif
176 @touch $@ 176 @touch $@
177 177
178archprepare: maketools include/asm-arm/.arch 178archprepare: maketools
179 179
180.PHONY: maketools FORCE 180.PHONY: maketools FORCE
181maketools: include/linux/version.h FORCE 181maketools: include/linux/version.h include/asm-arm/.arch FORCE
182 $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h 182 $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h
183 183
184# Convert bzImage to zImage 184# Convert bzImage to zImage
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index d74990717559..c02dc8116a18 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -68,6 +68,7 @@ static void gic_unmask_irq(unsigned int irq)
68 writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4); 68 writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4);
69} 69}
70 70
71#ifdef CONFIG_SMP
71static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu) 72static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
72{ 73{
73 void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3); 74 void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3);
@@ -78,6 +79,7 @@ static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu
78 val |= 1 << (cpu + shift); 79 val |= 1 << (cpu + shift);
79 writel(val, reg); 80 writel(val, reg);
80} 81}
82#endif
81 83
82static struct irqchip gic_chip = { 84static struct irqchip gic_chip = {
83 .ack = gic_ack_irq, 85 .ack = gic_ack_irq,
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index a7bd85700152..e8053d16829b 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -27,7 +27,6 @@
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28 28
29#include <asm/hardware.h> 29#include <asm/hardware.h>
30#include <asm/mach-types.h>
31#include <asm/io.h> 30#include <asm/io.h>
32#include <asm/irq.h> 31#include <asm/irq.h>
33#include <asm/mach/irq.h> 32#include <asm/mach/irq.h>
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 94aafec5fb46..c279e41ed10e 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -1,14 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.12-rc1-bk2 3# Linux kernel version: 2.6.14-rc1-git5
4# Sun Mar 27 22:53:40 2005 4# Tue Sep 20 17:26:28 2005
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
8CONFIG_UID16=y 8CONFIG_UID16=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y 9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10CONFIG_GENERIC_CALIBRATE_DELAY=y 10CONFIG_GENERIC_CALIBRATE_DELAY=y
11CONFIG_GENERIC_IOMAP=y
12 11
13# 12#
14# Code maturity level options 13# Code maturity level options
@@ -16,11 +15,13 @@ CONFIG_GENERIC_IOMAP=y
16CONFIG_EXPERIMENTAL=y 15CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y 16CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y 17CONFIG_BROKEN_ON_SMP=y
18CONFIG_INIT_ENV_ARG_LIMIT=32
19 19
20# 20#
21# General setup 21# General setup
22# 22#
23CONFIG_LOCALVERSION="" 23CONFIG_LOCALVERSION=""
24CONFIG_LOCALVERSION_AUTO=y
24CONFIG_SWAP=y 25CONFIG_SWAP=y
25CONFIG_SYSVIPC=y 26CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set 27# CONFIG_POSIX_MQUEUE is not set
@@ -31,10 +32,13 @@ CONFIG_SYSCTL=y
31# CONFIG_HOTPLUG is not set 32# CONFIG_HOTPLUG is not set
32CONFIG_KOBJECT_UEVENT=y 33CONFIG_KOBJECT_UEVENT=y
33# CONFIG_IKCONFIG is not set 34# CONFIG_IKCONFIG is not set
35CONFIG_INITRAMFS_SOURCE=""
34CONFIG_EMBEDDED=y 36CONFIG_EMBEDDED=y
35CONFIG_KALLSYMS=y 37CONFIG_KALLSYMS=y
36# CONFIG_KALLSYMS_ALL is not set 38# CONFIG_KALLSYMS_ALL is not set
37# CONFIG_KALLSYMS_EXTRA_PASS is not set 39# CONFIG_KALLSYMS_EXTRA_PASS is not set
40CONFIG_PRINTK=y
41CONFIG_BUG=y
38CONFIG_BASE_FULL=y 42CONFIG_BASE_FULL=y
39CONFIG_FUTEX=y 43CONFIG_FUTEX=y
40CONFIG_EPOLL=y 44CONFIG_EPOLL=y
@@ -81,6 +85,7 @@ CONFIG_ARCH_IXP4XX=y
81# CONFIG_ARCH_VERSATILE is not set 85# CONFIG_ARCH_VERSATILE is not set
82# CONFIG_ARCH_IMX is not set 86# CONFIG_ARCH_IMX is not set
83# CONFIG_ARCH_H720X is not set 87# CONFIG_ARCH_H720X is not set
88# CONFIG_ARCH_AAEC2000 is not set
84CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y 89CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y
85 90
86# 91#
@@ -90,15 +95,16 @@ CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y
90# 95#
91# IXP4xx Platforms 96# IXP4xx Platforms
92# 97#
93# CONFIG_ARCH_AVILA is not set 98CONFIG_ARCH_AVILA=y
94CONFIG_ARCH_ADI_COYOTE=y 99CONFIG_ARCH_ADI_COYOTE=y
95CONFIG_ARCH_IXDP425=y 100CONFIG_ARCH_IXDP425=y
96# CONFIG_MACH_IXDPG425 is not set 101CONFIG_MACH_IXDPG425=y
97# CONFIG_MACH_IXDP465 is not set 102CONFIG_MACH_IXDP465=y
98CONFIG_ARCH_IXCDP1100=y 103CONFIG_ARCH_IXCDP1100=y
99CONFIG_ARCH_PRPMC1100=y 104CONFIG_ARCH_PRPMC1100=y
100CONFIG_ARCH_IXDP4XX=y 105CONFIG_ARCH_IXDP4XX=y
101# CONFIG_MACH_GTWX5715 is not set 106CONFIG_CPU_IXP46X=y
107CONFIG_MACH_GTWX5715=y
102 108
103# 109#
104# IXP4xx Options 110# IXP4xx Options
@@ -114,7 +120,6 @@ CONFIG_CPU_32v5=y
114CONFIG_CPU_ABRT_EV5T=y 120CONFIG_CPU_ABRT_EV5T=y
115CONFIG_CPU_CACHE_VIVT=y 121CONFIG_CPU_CACHE_VIVT=y
116CONFIG_CPU_TLB_V4WBI=y 122CONFIG_CPU_TLB_V4WBI=y
117CONFIG_CPU_MINICACHE=y
118 123
119# 124#
120# Processor Features 125# Processor Features
@@ -127,9 +132,10 @@ CONFIG_DMABOUNCE=y
127# 132#
128# Bus support 133# Bus support
129# 134#
135CONFIG_ISA_DMA_API=y
130CONFIG_PCI=y 136CONFIG_PCI=y
131CONFIG_PCI_LEGACY_PROC=y 137CONFIG_PCI_LEGACY_PROC=y
132CONFIG_PCI_NAMES=y 138# CONFIG_PCI_DEBUG is not set
133 139
134# 140#
135# PCCARD (PCMCIA/CardBus) support 141# PCCARD (PCMCIA/CardBus) support
@@ -140,6 +146,15 @@ CONFIG_PCI_NAMES=y
140# Kernel Features 146# Kernel Features
141# 147#
142# CONFIG_PREEMPT is not set 148# CONFIG_PREEMPT is not set
149# CONFIG_NO_IDLE_HZ is not set
150# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
151CONFIG_SELECT_MEMORY_MODEL=y
152CONFIG_FLATMEM_MANUAL=y
153# CONFIG_DISCONTIGMEM_MANUAL is not set
154# CONFIG_SPARSEMEM_MANUAL is not set
155CONFIG_FLATMEM=y
156CONFIG_FLAT_NODE_MEM_MAP=y
157# CONFIG_SPARSEMEM_STATIC is not set
143CONFIG_ALIGNMENT_TRAP=y 158CONFIG_ALIGNMENT_TRAP=y
144 159
145# 160#
@@ -175,6 +190,241 @@ CONFIG_PM=y
175CONFIG_APM=y 190CONFIG_APM=y
176 191
177# 192#
193# Networking
194#
195CONFIG_NET=y
196
197#
198# Networking options
199#
200CONFIG_PACKET=m
201CONFIG_PACKET_MMAP=y
202CONFIG_UNIX=y
203CONFIG_XFRM=y
204# CONFIG_XFRM_USER is not set
205# CONFIG_NET_KEY is not set
206CONFIG_INET=y
207CONFIG_IP_MULTICAST=y
208CONFIG_IP_ADVANCED_ROUTER=y
209CONFIG_ASK_IP_FIB_HASH=y
210# CONFIG_IP_FIB_TRIE is not set
211CONFIG_IP_FIB_HASH=y
212CONFIG_IP_MULTIPLE_TABLES=y
213CONFIG_IP_ROUTE_FWMARK=y
214CONFIG_IP_ROUTE_MULTIPATH=y
215# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
216CONFIG_IP_ROUTE_VERBOSE=y
217CONFIG_IP_PNP=y
218CONFIG_IP_PNP_DHCP=y
219CONFIG_IP_PNP_BOOTP=y
220# CONFIG_IP_PNP_RARP is not set
221# CONFIG_NET_IPIP is not set
222CONFIG_NET_IPGRE=m
223CONFIG_NET_IPGRE_BROADCAST=y
224CONFIG_IP_MROUTE=y
225CONFIG_IP_PIMSM_V1=y
226CONFIG_IP_PIMSM_V2=y
227# CONFIG_ARPD is not set
228CONFIG_SYN_COOKIES=y
229# CONFIG_INET_AH is not set
230# CONFIG_INET_ESP is not set
231# CONFIG_INET_IPCOMP is not set
232CONFIG_INET_TUNNEL=m
233CONFIG_INET_DIAG=y
234CONFIG_INET_TCP_DIAG=y
235# CONFIG_TCP_CONG_ADVANCED is not set
236CONFIG_TCP_CONG_BIC=y
237
238#
239# IP: Virtual Server Configuration
240#
241CONFIG_IP_VS=m
242CONFIG_IP_VS_DEBUG=y
243CONFIG_IP_VS_TAB_BITS=12
244
245#
246# IPVS transport protocol load balancing support
247#
248# CONFIG_IP_VS_PROTO_TCP is not set
249# CONFIG_IP_VS_PROTO_UDP is not set
250# CONFIG_IP_VS_PROTO_ESP is not set
251# CONFIG_IP_VS_PROTO_AH is not set
252
253#
254# IPVS scheduler
255#
256CONFIG_IP_VS_RR=m
257CONFIG_IP_VS_WRR=m
258CONFIG_IP_VS_LC=m
259CONFIG_IP_VS_WLC=m
260CONFIG_IP_VS_LBLC=m
261CONFIG_IP_VS_LBLCR=m
262CONFIG_IP_VS_DH=m
263CONFIG_IP_VS_SH=m
264# CONFIG_IP_VS_SED is not set
265# CONFIG_IP_VS_NQ is not set
266
267#
268# IPVS application helper
269#
270# CONFIG_IPV6 is not set
271CONFIG_NETFILTER=y
272# CONFIG_NETFILTER_DEBUG is not set
273CONFIG_BRIDGE_NETFILTER=y
274# CONFIG_NETFILTER_NETLINK is not set
275
276#
277# IP: Netfilter Configuration
278#
279CONFIG_IP_NF_CONNTRACK=m
280# CONFIG_IP_NF_CT_ACCT is not set
281# CONFIG_IP_NF_CONNTRACK_MARK is not set
282# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
283# CONFIG_IP_NF_CT_PROTO_SCTP is not set
284CONFIG_IP_NF_FTP=m
285CONFIG_IP_NF_IRC=m
286# CONFIG_IP_NF_NETBIOS_NS is not set
287# CONFIG_IP_NF_TFTP is not set
288# CONFIG_IP_NF_AMANDA is not set
289CONFIG_IP_NF_QUEUE=m
290CONFIG_IP_NF_IPTABLES=m
291CONFIG_IP_NF_MATCH_LIMIT=m
292# CONFIG_IP_NF_MATCH_IPRANGE is not set
293CONFIG_IP_NF_MATCH_MAC=m
294# CONFIG_IP_NF_MATCH_PKTTYPE is not set
295CONFIG_IP_NF_MATCH_MARK=m
296CONFIG_IP_NF_MATCH_MULTIPORT=m
297CONFIG_IP_NF_MATCH_TOS=m
298# CONFIG_IP_NF_MATCH_RECENT is not set
299# CONFIG_IP_NF_MATCH_ECN is not set
300# CONFIG_IP_NF_MATCH_DSCP is not set
301CONFIG_IP_NF_MATCH_AH_ESP=m
302CONFIG_IP_NF_MATCH_LENGTH=m
303CONFIG_IP_NF_MATCH_TTL=m
304CONFIG_IP_NF_MATCH_TCPMSS=m
305# CONFIG_IP_NF_MATCH_HELPER is not set
306CONFIG_IP_NF_MATCH_STATE=m
307# CONFIG_IP_NF_MATCH_CONNTRACK is not set
308CONFIG_IP_NF_MATCH_OWNER=m
309# CONFIG_IP_NF_MATCH_PHYSDEV is not set
310# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
311# CONFIG_IP_NF_MATCH_REALM is not set
312# CONFIG_IP_NF_MATCH_SCTP is not set
313# CONFIG_IP_NF_MATCH_DCCP is not set
314# CONFIG_IP_NF_MATCH_COMMENT is not set
315# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
316# CONFIG_IP_NF_MATCH_STRING is not set
317CONFIG_IP_NF_FILTER=m
318CONFIG_IP_NF_TARGET_REJECT=m
319CONFIG_IP_NF_TARGET_LOG=m
320CONFIG_IP_NF_TARGET_ULOG=m
321CONFIG_IP_NF_TARGET_TCPMSS=m
322CONFIG_IP_NF_NAT=m
323CONFIG_IP_NF_NAT_NEEDED=y
324CONFIG_IP_NF_TARGET_MASQUERADE=m
325CONFIG_IP_NF_TARGET_REDIRECT=m
326# CONFIG_IP_NF_TARGET_NETMAP is not set
327# CONFIG_IP_NF_TARGET_SAME is not set
328CONFIG_IP_NF_NAT_SNMP_BASIC=m
329CONFIG_IP_NF_NAT_IRC=m
330CONFIG_IP_NF_NAT_FTP=m
331CONFIG_IP_NF_MANGLE=m
332CONFIG_IP_NF_TARGET_TOS=m
333# CONFIG_IP_NF_TARGET_ECN is not set
334# CONFIG_IP_NF_TARGET_DSCP is not set
335CONFIG_IP_NF_TARGET_MARK=m
336# CONFIG_IP_NF_TARGET_CLASSIFY is not set
337# CONFIG_IP_NF_TARGET_TTL is not set
338# CONFIG_IP_NF_RAW is not set
339CONFIG_IP_NF_ARPTABLES=m
340CONFIG_IP_NF_ARPFILTER=m
341# CONFIG_IP_NF_ARP_MANGLE is not set
342
343#
344# Bridge: Netfilter Configuration
345#
346# CONFIG_BRIDGE_NF_EBTABLES is not set
347
348#
349# DCCP Configuration (EXPERIMENTAL)
350#
351# CONFIG_IP_DCCP is not set
352
353#
354# SCTP Configuration (EXPERIMENTAL)
355#
356# CONFIG_IP_SCTP is not set
357CONFIG_ATM=y
358CONFIG_ATM_CLIP=y
359# CONFIG_ATM_CLIP_NO_ICMP is not set
360CONFIG_ATM_LANE=m
361CONFIG_ATM_MPOA=m
362CONFIG_ATM_BR2684=m
363# CONFIG_ATM_BR2684_IPFILTER is not set
364CONFIG_BRIDGE=m
365CONFIG_VLAN_8021Q=m
366# CONFIG_DECNET is not set
367CONFIG_LLC=m
368# CONFIG_LLC2 is not set
369CONFIG_IPX=m
370# CONFIG_IPX_INTERN is not set
371CONFIG_ATALK=m
372CONFIG_DEV_APPLETALK=y
373CONFIG_IPDDP=m
374CONFIG_IPDDP_ENCAP=y
375CONFIG_IPDDP_DECAP=y
376CONFIG_X25=m
377CONFIG_LAPB=m
378# CONFIG_NET_DIVERT is not set
379CONFIG_ECONET=m
380CONFIG_ECONET_AUNUDP=y
381CONFIG_ECONET_NATIVE=y
382CONFIG_WAN_ROUTER=m
383CONFIG_NET_SCHED=y
384CONFIG_NET_SCH_CLK_JIFFIES=y
385# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
386# CONFIG_NET_SCH_CLK_CPU is not set
387CONFIG_NET_SCH_CBQ=m
388CONFIG_NET_SCH_HTB=m
389# CONFIG_NET_SCH_HFSC is not set
390# CONFIG_NET_SCH_ATM is not set
391CONFIG_NET_SCH_PRIO=m
392CONFIG_NET_SCH_RED=m
393CONFIG_NET_SCH_SFQ=m
394CONFIG_NET_SCH_TEQL=m
395CONFIG_NET_SCH_TBF=m
396CONFIG_NET_SCH_GRED=m
397CONFIG_NET_SCH_DSMARK=m
398# CONFIG_NET_SCH_NETEM is not set
399CONFIG_NET_SCH_INGRESS=m
400CONFIG_NET_QOS=y
401CONFIG_NET_ESTIMATOR=y
402CONFIG_NET_CLS=y
403# CONFIG_NET_CLS_BASIC is not set
404CONFIG_NET_CLS_TCINDEX=m
405CONFIG_NET_CLS_ROUTE4=m
406CONFIG_NET_CLS_ROUTE=y
407CONFIG_NET_CLS_FW=m
408CONFIG_NET_CLS_U32=m
409# CONFIG_CLS_U32_PERF is not set
410# CONFIG_NET_CLS_IND is not set
411# CONFIG_CLS_U32_MARK is not set
412CONFIG_NET_CLS_RSVP=m
413CONFIG_NET_CLS_RSVP6=m
414# CONFIG_NET_EMATCH is not set
415# CONFIG_NET_CLS_ACT is not set
416CONFIG_NET_CLS_POLICE=y
417
418#
419# Network testing
420#
421CONFIG_NET_PKTGEN=m
422# CONFIG_HAMRADIO is not set
423# CONFIG_IRDA is not set
424# CONFIG_BT is not set
425# CONFIG_IEEE80211 is not set
426
427#
178# Device Drivers 428# Device Drivers
179# 429#
180 430
@@ -244,6 +494,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
244CONFIG_MTD_IXP4XX=y 494CONFIG_MTD_IXP4XX=y
245# CONFIG_MTD_EDB7312 is not set 495# CONFIG_MTD_EDB7312 is not set
246# CONFIG_MTD_PCI is not set 496# CONFIG_MTD_PCI is not set
497# CONFIG_MTD_PLATRAM is not set
247 498
248# 499#
249# Self-contained MTD device drivers 500# Self-contained MTD device drivers
@@ -283,7 +534,6 @@ CONFIG_MTD_NAND_IDS=m
283# 534#
284# Block devices 535# Block devices
285# 536#
286# CONFIG_BLK_DEV_FD is not set
287# CONFIG_BLK_CPQ_DA is not set 537# CONFIG_BLK_CPQ_DA is not set
288# CONFIG_BLK_CPQ_CISS_DA is not set 538# CONFIG_BLK_CPQ_CISS_DA is not set
289# CONFIG_BLK_DEV_DAC960 is not set 539# CONFIG_BLK_DEV_DAC960 is not set
@@ -297,7 +547,6 @@ CONFIG_BLK_DEV_RAM=y
297CONFIG_BLK_DEV_RAM_COUNT=16 547CONFIG_BLK_DEV_RAM_COUNT=16
298CONFIG_BLK_DEV_RAM_SIZE=8192 548CONFIG_BLK_DEV_RAM_SIZE=8192
299CONFIG_BLK_DEV_INITRD=y 549CONFIG_BLK_DEV_INITRD=y
300CONFIG_INITRAMFS_SOURCE=""
301# CONFIG_CDROM_PKTCDVD is not set 550# CONFIG_CDROM_PKTCDVD is not set
302 551
303# 552#
@@ -351,6 +600,7 @@ CONFIG_BLK_DEV_CMD64X=y
351CONFIG_BLK_DEV_HPT366=y 600CONFIG_BLK_DEV_HPT366=y
352# CONFIG_BLK_DEV_SC1200 is not set 601# CONFIG_BLK_DEV_SC1200 is not set
353# CONFIG_BLK_DEV_PIIX is not set 602# CONFIG_BLK_DEV_PIIX is not set
603# CONFIG_BLK_DEV_IT821X is not set
354# CONFIG_BLK_DEV_NS87415 is not set 604# CONFIG_BLK_DEV_NS87415 is not set
355# CONFIG_BLK_DEV_PDC202XX_OLD is not set 605# CONFIG_BLK_DEV_PDC202XX_OLD is not set
356CONFIG_BLK_DEV_PDC202XX_NEW=y 606CONFIG_BLK_DEV_PDC202XX_NEW=y
@@ -369,6 +619,7 @@ CONFIG_BLK_DEV_IDEDMA=y
369# 619#
370# SCSI device support 620# SCSI device support
371# 621#
622# CONFIG_RAID_ATTRS is not set
372# CONFIG_SCSI is not set 623# CONFIG_SCSI is not set
373 624
374# 625#
@@ -379,6 +630,7 @@ CONFIG_BLK_DEV_IDEDMA=y
379# 630#
380# Fusion MPT device support 631# Fusion MPT device support
381# 632#
633# CONFIG_FUSION is not set
382 634
383# 635#
384# IEEE 1394 (FireWire) support 636# IEEE 1394 (FireWire) support
@@ -391,235 +643,13 @@ CONFIG_BLK_DEV_IDEDMA=y
391# CONFIG_I2O is not set 643# CONFIG_I2O is not set
392 644
393# 645#
394# Networking support 646# Network device support
395#
396CONFIG_NET=y
397
398#
399# Networking options
400#
401CONFIG_PACKET=m
402CONFIG_PACKET_MMAP=y
403CONFIG_NETLINK_DEV=m
404CONFIG_UNIX=y
405# CONFIG_NET_KEY is not set
406CONFIG_INET=y
407CONFIG_IP_MULTICAST=y
408CONFIG_IP_ADVANCED_ROUTER=y
409CONFIG_IP_MULTIPLE_TABLES=y
410CONFIG_IP_ROUTE_FWMARK=y
411CONFIG_IP_ROUTE_MULTIPATH=y
412# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
413CONFIG_IP_ROUTE_VERBOSE=y
414CONFIG_IP_PNP=y
415CONFIG_IP_PNP_DHCP=y
416CONFIG_IP_PNP_BOOTP=y
417# CONFIG_IP_PNP_RARP is not set
418# CONFIG_NET_IPIP is not set
419CONFIG_NET_IPGRE=m
420CONFIG_NET_IPGRE_BROADCAST=y
421CONFIG_IP_MROUTE=y
422CONFIG_IP_PIMSM_V1=y
423CONFIG_IP_PIMSM_V2=y
424# CONFIG_ARPD is not set
425CONFIG_SYN_COOKIES=y
426# CONFIG_INET_AH is not set
427# CONFIG_INET_ESP is not set
428# CONFIG_INET_IPCOMP is not set
429CONFIG_INET_TUNNEL=m
430# CONFIG_IP_TCPDIAG is not set
431# CONFIG_IP_TCPDIAG_IPV6 is not set
432
433#
434# IP: Virtual Server Configuration
435#
436CONFIG_IP_VS=m
437CONFIG_IP_VS_DEBUG=y
438CONFIG_IP_VS_TAB_BITS=12
439
440#
441# IPVS transport protocol load balancing support
442#
443# CONFIG_IP_VS_PROTO_TCP is not set
444# CONFIG_IP_VS_PROTO_UDP is not set
445# CONFIG_IP_VS_PROTO_ESP is not set
446# CONFIG_IP_VS_PROTO_AH is not set
447
448#
449# IPVS scheduler
450#
451CONFIG_IP_VS_RR=m
452CONFIG_IP_VS_WRR=m
453CONFIG_IP_VS_LC=m
454CONFIG_IP_VS_WLC=m
455CONFIG_IP_VS_LBLC=m
456CONFIG_IP_VS_LBLCR=m
457CONFIG_IP_VS_DH=m
458CONFIG_IP_VS_SH=m
459# CONFIG_IP_VS_SED is not set
460# CONFIG_IP_VS_NQ is not set
461
462#
463# IPVS application helper
464#
465# CONFIG_IPV6 is not set
466CONFIG_NETFILTER=y
467# CONFIG_NETFILTER_DEBUG is not set
468CONFIG_BRIDGE_NETFILTER=y
469
470#
471# IP: Netfilter Configuration
472# 647#
473CONFIG_IP_NF_CONNTRACK=m
474# CONFIG_IP_NF_CT_ACCT is not set
475# CONFIG_IP_NF_CONNTRACK_MARK is not set
476# CONFIG_IP_NF_CT_PROTO_SCTP is not set
477CONFIG_IP_NF_FTP=m
478CONFIG_IP_NF_IRC=m
479# CONFIG_IP_NF_TFTP is not set
480# CONFIG_IP_NF_AMANDA is not set
481CONFIG_IP_NF_QUEUE=m
482CONFIG_IP_NF_IPTABLES=m
483CONFIG_IP_NF_MATCH_LIMIT=m
484# CONFIG_IP_NF_MATCH_IPRANGE is not set
485CONFIG_IP_NF_MATCH_MAC=m
486# CONFIG_IP_NF_MATCH_PKTTYPE is not set
487CONFIG_IP_NF_MATCH_MARK=m
488CONFIG_IP_NF_MATCH_MULTIPORT=m
489CONFIG_IP_NF_MATCH_TOS=m
490# CONFIG_IP_NF_MATCH_RECENT is not set
491# CONFIG_IP_NF_MATCH_ECN is not set
492# CONFIG_IP_NF_MATCH_DSCP is not set
493CONFIG_IP_NF_MATCH_AH_ESP=m
494CONFIG_IP_NF_MATCH_LENGTH=m
495CONFIG_IP_NF_MATCH_TTL=m
496CONFIG_IP_NF_MATCH_TCPMSS=m
497# CONFIG_IP_NF_MATCH_HELPER is not set
498CONFIG_IP_NF_MATCH_STATE=m
499# CONFIG_IP_NF_MATCH_CONNTRACK is not set
500CONFIG_IP_NF_MATCH_OWNER=m
501# CONFIG_IP_NF_MATCH_PHYSDEV is not set
502# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
503# CONFIG_IP_NF_MATCH_REALM is not set
504# CONFIG_IP_NF_MATCH_SCTP is not set
505# CONFIG_IP_NF_MATCH_COMMENT is not set
506# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
507CONFIG_IP_NF_FILTER=m
508CONFIG_IP_NF_TARGET_REJECT=m
509CONFIG_IP_NF_TARGET_LOG=m
510CONFIG_IP_NF_TARGET_ULOG=m
511CONFIG_IP_NF_TARGET_TCPMSS=m
512CONFIG_IP_NF_NAT=m
513CONFIG_IP_NF_NAT_NEEDED=y
514CONFIG_IP_NF_TARGET_MASQUERADE=m
515CONFIG_IP_NF_TARGET_REDIRECT=m
516# CONFIG_IP_NF_TARGET_NETMAP is not set
517# CONFIG_IP_NF_TARGET_SAME is not set
518CONFIG_IP_NF_NAT_SNMP_BASIC=m
519CONFIG_IP_NF_NAT_IRC=m
520CONFIG_IP_NF_NAT_FTP=m
521CONFIG_IP_NF_MANGLE=m
522CONFIG_IP_NF_TARGET_TOS=m
523# CONFIG_IP_NF_TARGET_ECN is not set
524# CONFIG_IP_NF_TARGET_DSCP is not set
525CONFIG_IP_NF_TARGET_MARK=m
526# CONFIG_IP_NF_TARGET_CLASSIFY is not set
527# CONFIG_IP_NF_RAW is not set
528CONFIG_IP_NF_ARPTABLES=m
529CONFIG_IP_NF_ARPFILTER=m
530# CONFIG_IP_NF_ARP_MANGLE is not set
531
532#
533# Bridge: Netfilter Configuration
534#
535# CONFIG_BRIDGE_NF_EBTABLES is not set
536CONFIG_XFRM=y
537# CONFIG_XFRM_USER is not set
538
539#
540# SCTP Configuration (EXPERIMENTAL)
541#
542# CONFIG_IP_SCTP is not set
543CONFIG_ATM=y
544CONFIG_ATM_CLIP=y
545# CONFIG_ATM_CLIP_NO_ICMP is not set
546CONFIG_ATM_LANE=m
547CONFIG_ATM_MPOA=m
548CONFIG_ATM_BR2684=m
549# CONFIG_ATM_BR2684_IPFILTER is not set
550CONFIG_BRIDGE=m
551CONFIG_VLAN_8021Q=m
552# CONFIG_DECNET is not set
553CONFIG_LLC=m
554# CONFIG_LLC2 is not set
555CONFIG_IPX=m
556# CONFIG_IPX_INTERN is not set
557CONFIG_ATALK=m
558CONFIG_DEV_APPLETALK=y
559CONFIG_IPDDP=m
560CONFIG_IPDDP_ENCAP=y
561CONFIG_IPDDP_DECAP=y
562CONFIG_X25=m
563CONFIG_LAPB=m
564# CONFIG_NET_DIVERT is not set
565CONFIG_ECONET=m
566CONFIG_ECONET_AUNUDP=y
567CONFIG_ECONET_NATIVE=y
568CONFIG_WAN_ROUTER=m
569
570#
571# QoS and/or fair queueing
572#
573CONFIG_NET_SCHED=y
574CONFIG_NET_SCH_CLK_JIFFIES=y
575# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
576# CONFIG_NET_SCH_CLK_CPU is not set
577CONFIG_NET_SCH_CBQ=m
578CONFIG_NET_SCH_HTB=m
579# CONFIG_NET_SCH_HFSC is not set
580# CONFIG_NET_SCH_ATM is not set
581CONFIG_NET_SCH_PRIO=m
582CONFIG_NET_SCH_RED=m
583CONFIG_NET_SCH_SFQ=m
584CONFIG_NET_SCH_TEQL=m
585CONFIG_NET_SCH_TBF=m
586CONFIG_NET_SCH_GRED=m
587CONFIG_NET_SCH_DSMARK=m
588# CONFIG_NET_SCH_NETEM is not set
589CONFIG_NET_SCH_INGRESS=m
590CONFIG_NET_QOS=y
591CONFIG_NET_ESTIMATOR=y
592CONFIG_NET_CLS=y
593# CONFIG_NET_CLS_BASIC is not set
594CONFIG_NET_CLS_TCINDEX=m
595CONFIG_NET_CLS_ROUTE4=m
596CONFIG_NET_CLS_ROUTE=y
597CONFIG_NET_CLS_FW=m
598CONFIG_NET_CLS_U32=m
599# CONFIG_CLS_U32_PERF is not set
600# CONFIG_NET_CLS_IND is not set
601# CONFIG_CLS_U32_MARK is not set
602CONFIG_NET_CLS_RSVP=m
603CONFIG_NET_CLS_RSVP6=m
604# CONFIG_NET_EMATCH is not set
605# CONFIG_NET_CLS_ACT is not set
606CONFIG_NET_CLS_POLICE=y
607
608#
609# Network testing
610#
611CONFIG_NET_PKTGEN=m
612# CONFIG_NETPOLL is not set
613# CONFIG_NET_POLL_CONTROLLER is not set
614# CONFIG_HAMRADIO is not set
615# CONFIG_IRDA is not set
616# CONFIG_BT is not set
617CONFIG_NETDEVICES=y 648CONFIG_NETDEVICES=y
618CONFIG_DUMMY=y 649CONFIG_DUMMY=y
619# CONFIG_BONDING is not set 650# CONFIG_BONDING is not set
620# CONFIG_EQUALIZER is not set 651# CONFIG_EQUALIZER is not set
621# CONFIG_TUN is not set 652# CONFIG_TUN is not set
622# CONFIG_ETHERTAP is not set
623 653
624# 654#
625# ARCnet devices 655# ARCnet devices
@@ -627,6 +657,11 @@ CONFIG_DUMMY=y
627# CONFIG_ARCNET is not set 657# CONFIG_ARCNET is not set
628 658
629# 659#
660# PHY device support
661#
662# CONFIG_PHYLIB is not set
663
664#
630# Ethernet (10 or 100Mbit) 665# Ethernet (10 or 100Mbit)
631# 666#
632CONFIG_NET_ETHERNET=y 667CONFIG_NET_ETHERNET=y
@@ -635,6 +670,7 @@ CONFIG_MII=y
635# CONFIG_SUNGEM is not set 670# CONFIG_SUNGEM is not set
636# CONFIG_NET_VENDOR_3COM is not set 671# CONFIG_NET_VENDOR_3COM is not set
637# CONFIG_SMC91X is not set 672# CONFIG_SMC91X is not set
673# CONFIG_DM9000 is not set
638 674
639# 675#
640# Tulip family network device support 676# Tulip family network device support
@@ -671,13 +707,17 @@ CONFIG_EEPRO100=y
671# CONFIG_HAMACHI is not set 707# CONFIG_HAMACHI is not set
672# CONFIG_YELLOWFIN is not set 708# CONFIG_YELLOWFIN is not set
673# CONFIG_R8169 is not set 709# CONFIG_R8169 is not set
710# CONFIG_SIS190 is not set
711# CONFIG_SKGE is not set
674# CONFIG_SK98LIN is not set 712# CONFIG_SK98LIN is not set
675# CONFIG_VIA_VELOCITY is not set 713# CONFIG_VIA_VELOCITY is not set
676# CONFIG_TIGON3 is not set 714# CONFIG_TIGON3 is not set
715# CONFIG_BNX2 is not set
677 716
678# 717#
679# Ethernet (10000 Mbit) 718# Ethernet (10000 Mbit)
680# 719#
720# CONFIG_CHELSIO_T1 is not set
681# CONFIG_IXGB is not set 721# CONFIG_IXGB is not set
682# CONFIG_S2IO is not set 722# CONFIG_S2IO is not set
683 723
@@ -702,6 +742,7 @@ CONFIG_NET_RADIO=y
702CONFIG_HERMES=y 742CONFIG_HERMES=y
703# CONFIG_PLX_HERMES is not set 743# CONFIG_PLX_HERMES is not set
704# CONFIG_TMD_HERMES is not set 744# CONFIG_TMD_HERMES is not set
745# CONFIG_NORTEL_HERMES is not set
705CONFIG_PCI_HERMES=y 746CONFIG_PCI_HERMES=y
706# CONFIG_ATMEL is not set 747# CONFIG_ATMEL is not set
707 748
@@ -709,6 +750,7 @@ CONFIG_PCI_HERMES=y
709# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support 750# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
710# 751#
711# CONFIG_PRISM54 is not set 752# CONFIG_PRISM54 is not set
753# CONFIG_HOSTAP is not set
712CONFIG_NET_WIRELESS=y 754CONFIG_NET_WIRELESS=y
713 755
714# 756#
@@ -758,6 +800,8 @@ CONFIG_ATM_TCP=m
758# CONFIG_SLIP is not set 800# CONFIG_SLIP is not set
759# CONFIG_SHAPER is not set 801# CONFIG_SHAPER is not set
760# CONFIG_NETCONSOLE is not set 802# CONFIG_NETCONSOLE is not set
803# CONFIG_NETPOLL is not set
804# CONFIG_NET_POLL_CONTROLLER is not set
761 805
762# 806#
763# ISDN subsystem 807# ISDN subsystem
@@ -795,7 +839,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
795# 839#
796# CONFIG_SERIO is not set 840# CONFIG_SERIO is not set
797# CONFIG_GAMEPORT is not set 841# CONFIG_GAMEPORT is not set
798CONFIG_SOUND_GAMEPORT=y
799 842
800# 843#
801# Character devices 844# Character devices
@@ -816,6 +859,7 @@ CONFIG_SERIAL_8250_NR_UARTS=2
816# 859#
817CONFIG_SERIAL_CORE=y 860CONFIG_SERIAL_CORE=y
818CONFIG_SERIAL_CORE_CONSOLE=y 861CONFIG_SERIAL_CORE_CONSOLE=y
862# CONFIG_SERIAL_JSM is not set
819CONFIG_UNIX98_PTYS=y 863CONFIG_UNIX98_PTYS=y
820CONFIG_LEGACY_PTYS=y 864CONFIG_LEGACY_PTYS=y
821CONFIG_LEGACY_PTY_COUNT=256 865CONFIG_LEGACY_PTY_COUNT=256
@@ -882,12 +926,11 @@ CONFIG_I2C_ALGOBIT=y
882# CONFIG_I2C_AMD8111 is not set 926# CONFIG_I2C_AMD8111 is not set
883# CONFIG_I2C_I801 is not set 927# CONFIG_I2C_I801 is not set
884# CONFIG_I2C_I810 is not set 928# CONFIG_I2C_I810 is not set
929# CONFIG_I2C_PIIX4 is not set
885# CONFIG_I2C_IOP3XX is not set 930# CONFIG_I2C_IOP3XX is not set
886# CONFIG_I2C_ISA is not set
887CONFIG_I2C_IXP4XX=y 931CONFIG_I2C_IXP4XX=y
888# CONFIG_I2C_NFORCE2 is not set 932# CONFIG_I2C_NFORCE2 is not set
889# CONFIG_I2C_PARPORT_LIGHT is not set 933# CONFIG_I2C_PARPORT_LIGHT is not set
890# CONFIG_I2C_PIIX4 is not set
891# CONFIG_I2C_PROSAVAGE is not set 934# CONFIG_I2C_PROSAVAGE is not set
892# CONFIG_I2C_SAVAGE4 is not set 935# CONFIG_I2C_SAVAGE4 is not set
893# CONFIG_SCx200_ACB is not set 936# CONFIG_SCx200_ACB is not set
@@ -901,14 +944,33 @@ CONFIG_I2C_IXP4XX=y
901# CONFIG_I2C_PCA_ISA is not set 944# CONFIG_I2C_PCA_ISA is not set
902 945
903# 946#
904# Hardware Sensors Chip support 947# Miscellaneous I2C Chip support
905# 948#
906CONFIG_I2C_SENSOR=y 949# CONFIG_SENSORS_DS1337 is not set
950# CONFIG_SENSORS_DS1374 is not set
951CONFIG_SENSORS_EEPROM=y
952# CONFIG_SENSORS_PCF8574 is not set
953# CONFIG_SENSORS_PCA9539 is not set
954# CONFIG_SENSORS_PCF8591 is not set
955# CONFIG_SENSORS_RTC8564 is not set
956# CONFIG_SENSORS_MAX6875 is not set
957# CONFIG_I2C_DEBUG_CORE is not set
958# CONFIG_I2C_DEBUG_ALGO is not set
959# CONFIG_I2C_DEBUG_BUS is not set
960# CONFIG_I2C_DEBUG_CHIP is not set
961
962#
963# Hardware Monitoring support
964#
965CONFIG_HWMON=y
966# CONFIG_HWMON_VID is not set
907# CONFIG_SENSORS_ADM1021 is not set 967# CONFIG_SENSORS_ADM1021 is not set
908# CONFIG_SENSORS_ADM1025 is not set 968# CONFIG_SENSORS_ADM1025 is not set
909# CONFIG_SENSORS_ADM1026 is not set 969# CONFIG_SENSORS_ADM1026 is not set
910# CONFIG_SENSORS_ADM1031 is not set 970# CONFIG_SENSORS_ADM1031 is not set
971# CONFIG_SENSORS_ADM9240 is not set
911# CONFIG_SENSORS_ASB100 is not set 972# CONFIG_SENSORS_ASB100 is not set
973# CONFIG_SENSORS_ATXP1 is not set
912# CONFIG_SENSORS_DS1621 is not set 974# CONFIG_SENSORS_DS1621 is not set
913# CONFIG_SENSORS_FSCHER is not set 975# CONFIG_SENSORS_FSCHER is not set
914# CONFIG_SENSORS_FSCPOS is not set 976# CONFIG_SENSORS_FSCPOS is not set
@@ -924,30 +986,26 @@ CONFIG_I2C_SENSOR=y
924# CONFIG_SENSORS_LM85 is not set 986# CONFIG_SENSORS_LM85 is not set
925# CONFIG_SENSORS_LM87 is not set 987# CONFIG_SENSORS_LM87 is not set
926# CONFIG_SENSORS_LM90 is not set 988# CONFIG_SENSORS_LM90 is not set
989# CONFIG_SENSORS_LM92 is not set
927# CONFIG_SENSORS_MAX1619 is not set 990# CONFIG_SENSORS_MAX1619 is not set
928# CONFIG_SENSORS_PC87360 is not set 991# CONFIG_SENSORS_PC87360 is not set
929# CONFIG_SENSORS_SMSC47B397 is not set
930# CONFIG_SENSORS_SIS5595 is not set 992# CONFIG_SENSORS_SIS5595 is not set
931# CONFIG_SENSORS_SMSC47M1 is not set 993# CONFIG_SENSORS_SMSC47M1 is not set
994# CONFIG_SENSORS_SMSC47B397 is not set
932# CONFIG_SENSORS_VIA686A is not set 995# CONFIG_SENSORS_VIA686A is not set
933# CONFIG_SENSORS_W83781D is not set 996# CONFIG_SENSORS_W83781D is not set
997# CONFIG_SENSORS_W83792D is not set
934# CONFIG_SENSORS_W83L785TS is not set 998# CONFIG_SENSORS_W83L785TS is not set
935# CONFIG_SENSORS_W83627HF is not set 999# CONFIG_SENSORS_W83627HF is not set
1000# CONFIG_SENSORS_W83627EHF is not set
1001# CONFIG_HWMON_DEBUG_CHIP is not set
936 1002
937# 1003#
938# Other I2C Chip support 1004# Misc devices
939# 1005#
940CONFIG_SENSORS_EEPROM=y
941# CONFIG_SENSORS_PCF8574 is not set
942# CONFIG_SENSORS_PCF8591 is not set
943# CONFIG_SENSORS_RTC8564 is not set
944# CONFIG_I2C_DEBUG_CORE is not set
945# CONFIG_I2C_DEBUG_ALGO is not set
946# CONFIG_I2C_DEBUG_BUS is not set
947# CONFIG_I2C_DEBUG_CHIP is not set
948 1006
949# 1007#
950# Misc devices 1008# Multimedia Capabilities Port drivers
951# 1009#
952 1010
953# 1011#
@@ -994,6 +1052,7 @@ CONFIG_EXT2_FS=y
994CONFIG_EXT2_FS_XATTR=y 1052CONFIG_EXT2_FS_XATTR=y
995CONFIG_EXT2_FS_POSIX_ACL=y 1053CONFIG_EXT2_FS_POSIX_ACL=y
996# CONFIG_EXT2_FS_SECURITY is not set 1054# CONFIG_EXT2_FS_SECURITY is not set
1055# CONFIG_EXT2_FS_XIP is not set
997CONFIG_EXT3_FS=y 1056CONFIG_EXT3_FS=y
998CONFIG_EXT3_FS_XATTR=y 1057CONFIG_EXT3_FS_XATTR=y
999CONFIG_EXT3_FS_POSIX_ACL=y 1058CONFIG_EXT3_FS_POSIX_ACL=y
@@ -1004,17 +1063,15 @@ CONFIG_FS_MBCACHE=y
1004# CONFIG_REISERFS_FS is not set 1063# CONFIG_REISERFS_FS is not set
1005# CONFIG_JFS_FS is not set 1064# CONFIG_JFS_FS is not set
1006CONFIG_FS_POSIX_ACL=y 1065CONFIG_FS_POSIX_ACL=y
1007
1008#
1009# XFS support
1010#
1011# CONFIG_XFS_FS is not set 1066# CONFIG_XFS_FS is not set
1012# CONFIG_MINIX_FS is not set 1067# CONFIG_MINIX_FS is not set
1013# CONFIG_ROMFS_FS is not set 1068# CONFIG_ROMFS_FS is not set
1069CONFIG_INOTIFY=y
1014# CONFIG_QUOTA is not set 1070# CONFIG_QUOTA is not set
1015CONFIG_DNOTIFY=y 1071CONFIG_DNOTIFY=y
1016# CONFIG_AUTOFS_FS is not set 1072# CONFIG_AUTOFS_FS is not set
1017# CONFIG_AUTOFS4_FS is not set 1073# CONFIG_AUTOFS4_FS is not set
1074# CONFIG_FUSE_FS is not set
1018 1075
1019# 1076#
1020# CD-ROM/DVD Filesystems 1077# CD-ROM/DVD Filesystems
@@ -1034,12 +1091,10 @@ CONFIG_DNOTIFY=y
1034# 1091#
1035CONFIG_PROC_FS=y 1092CONFIG_PROC_FS=y
1036CONFIG_SYSFS=y 1093CONFIG_SYSFS=y
1037# CONFIG_DEVFS_FS is not set
1038# CONFIG_DEVPTS_FS_XATTR is not set
1039CONFIG_TMPFS=y 1094CONFIG_TMPFS=y
1040# CONFIG_TMPFS_XATTR is not set
1041# CONFIG_HUGETLB_PAGE is not set 1095# CONFIG_HUGETLB_PAGE is not set
1042CONFIG_RAMFS=y 1096CONFIG_RAMFS=y
1097# CONFIG_RELAYFS_FS is not set
1043 1098
1044# 1099#
1045# Miscellaneous filesystems 1100# Miscellaneous filesystems
@@ -1054,8 +1109,7 @@ CONFIG_RAMFS=y
1054# CONFIG_JFFS_FS is not set 1109# CONFIG_JFFS_FS is not set
1055CONFIG_JFFS2_FS=y 1110CONFIG_JFFS2_FS=y
1056CONFIG_JFFS2_FS_DEBUG=0 1111CONFIG_JFFS2_FS_DEBUG=0
1057# CONFIG_JFFS2_FS_NAND is not set 1112CONFIG_JFFS2_FS_WRITEBUFFER=y
1058# CONFIG_JFFS2_FS_NOR_ECC is not set
1059# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set 1113# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1060CONFIG_JFFS2_ZLIB=y 1114CONFIG_JFFS2_ZLIB=y
1061CONFIG_JFFS2_RTIME=y 1115CONFIG_JFFS2_RTIME=y
@@ -1072,12 +1126,14 @@ CONFIG_JFFS2_RTIME=y
1072# 1126#
1073CONFIG_NFS_FS=y 1127CONFIG_NFS_FS=y
1074CONFIG_NFS_V3=y 1128CONFIG_NFS_V3=y
1129# CONFIG_NFS_V3_ACL is not set
1075# CONFIG_NFS_V4 is not set 1130# CONFIG_NFS_V4 is not set
1076# CONFIG_NFS_DIRECTIO is not set 1131# CONFIG_NFS_DIRECTIO is not set
1077# CONFIG_NFSD is not set 1132# CONFIG_NFSD is not set
1078CONFIG_ROOT_NFS=y 1133CONFIG_ROOT_NFS=y
1079CONFIG_LOCKD=y 1134CONFIG_LOCKD=y
1080CONFIG_LOCKD_V4=y 1135CONFIG_LOCKD_V4=y
1136CONFIG_NFS_COMMON=y
1081CONFIG_SUNRPC=y 1137CONFIG_SUNRPC=y
1082# CONFIG_RPCSEC_GSS_KRB5 is not set 1138# CONFIG_RPCSEC_GSS_KRB5 is not set
1083# CONFIG_RPCSEC_GSS_SPKM3 is not set 1139# CONFIG_RPCSEC_GSS_SPKM3 is not set
@@ -1086,6 +1142,7 @@ CONFIG_SUNRPC=y
1086# CONFIG_NCP_FS is not set 1142# CONFIG_NCP_FS is not set
1087# CONFIG_CODA_FS is not set 1143# CONFIG_CODA_FS is not set
1088# CONFIG_AFS_FS is not set 1144# CONFIG_AFS_FS is not set
1145# CONFIG_9P_FS is not set
1089 1146
1090# 1147#
1091# Partition Types 1148# Partition Types
@@ -1124,6 +1181,7 @@ CONFIG_MSDOS_PARTITION=y
1124CONFIG_DEBUG_KERNEL=y 1181CONFIG_DEBUG_KERNEL=y
1125CONFIG_MAGIC_SYSRQ=y 1182CONFIG_MAGIC_SYSRQ=y
1126CONFIG_LOG_BUF_SHIFT=14 1183CONFIG_LOG_BUF_SHIFT=14
1184CONFIG_DETECT_SOFTLOCKUP=y
1127# CONFIG_SCHEDSTATS is not set 1185# CONFIG_SCHEDSTATS is not set
1128# CONFIG_DEBUG_SLAB is not set 1186# CONFIG_DEBUG_SLAB is not set
1129# CONFIG_DEBUG_SPINLOCK is not set 1187# CONFIG_DEBUG_SPINLOCK is not set
@@ -1158,6 +1216,7 @@ CONFIG_DEBUG_LL=y
1158# Library routines 1216# Library routines
1159# 1217#
1160# CONFIG_CRC_CCITT is not set 1218# CONFIG_CRC_CCITT is not set
1219# CONFIG_CRC16 is not set
1161CONFIG_CRC32=y 1220CONFIG_CRC32=y
1162# CONFIG_LIBCRC32C is not set 1221# CONFIG_LIBCRC32C is not set
1163CONFIG_ZLIB_INFLATE=y 1222CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7152bfbee581..93b5e8e5292e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -537,7 +537,7 @@ ENTRY(__switch_to)
537#ifdef CONFIG_CPU_MPCORE 537#ifdef CONFIG_CPU_MPCORE
538 clrex 538 clrex
539#else 539#else
540 strex r3, r4, [ip] @ Clear exclusive monitor 540 strex r5, r4, [ip] @ Clear exclusive monitor
541#endif 541#endif
542#endif 542#endif
543#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) 543#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index 6c20c1188b60..1f6822dfae74 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -7,7 +7,7 @@
7 * Copy data from IO memory space to "real" memory space. 7 * Copy data from IO memory space to "real" memory space.
8 * This needs to be optimized. 8 * This needs to be optimized.
9 */ 9 */
10void _memcpy_fromio(void *to, void __iomem *from, size_t count) 10void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
11{ 11{
12 unsigned char *t = to; 12 unsigned char *t = to;
13 while (count) { 13 while (count) {
@@ -22,7 +22,7 @@ void _memcpy_fromio(void *to, void __iomem *from, size_t count)
22 * Copy data from "real" memory space to IO memory space. 22 * Copy data from "real" memory space to IO memory space.
23 * This needs to be optimized. 23 * This needs to be optimized.
24 */ 24 */
25void _memcpy_toio(void __iomem *to, const void *from, size_t count) 25void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
26{ 26{
27 const unsigned char *f = from; 27 const unsigned char *f = from;
28 while (count) { 28 while (count) {
@@ -37,7 +37,7 @@ void _memcpy_toio(void __iomem *to, const void *from, size_t count)
37 * "memset" on IO memory space. 37 * "memset" on IO memory space.
38 * This needs to be optimized. 38 * This needs to be optimized.
39 */ 39 */
40void _memset_io(void __iomem *dst, int c, size_t count) 40void _memset_io(volatile void __iomem *dst, int c, size_t count)
41{ 41{
42 while (count) { 42 while (count) {
43 count--; 43 count--;
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index f83a59761e02..3d88da0c287b 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -31,6 +31,8 @@
31 31
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33 33
34#include <asm/memory.h>
35
34#include "common.h" 36#include "common.h"
35 37
36struct meminfo memmap = { 38struct meminfo memmap = {
diff --git a/arch/arm/mach-imx/leds-mx1ads.c b/arch/arm/mach-imx/leds-mx1ads.c
index e6399b06e4a4..79236404aec2 100644
--- a/arch/arm/mach-imx/leds-mx1ads.c
+++ b/arch/arm/mach-imx/leds-mx1ads.c
@@ -17,7 +17,6 @@
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/leds.h> 19#include <asm/leds.h>
20#include <asm/mach-types.h>
21#include "leds.h" 20#include "leds.h"
22 21
23/* 22/*
diff --git a/arch/arm/mach-iop3xx/common.c b/arch/arm/mach-iop3xx/common.c
index bda7394ec06c..fdeeef489a73 100644
--- a/arch/arm/mach-iop3xx/common.c
+++ b/arch/arm/mach-iop3xx/common.c
@@ -27,7 +27,6 @@ unsigned long iop3xx_pcibios_min_mem = 0;
27/* 27/*
28 * Default power-off for EP80219 28 * Default power-off for EP80219
29 */ 29 */
30#include <asm/mach-types.h>
31 30
32static inline void ep80219_send_to_pic(__u8 c) { 31static inline void ep80219_send_to_pic(__u8 c) {
33} 32}
diff --git a/arch/arm/mach-iop3xx/iop321-time.c b/arch/arm/mach-iop3xx/iop321-time.c
index 0039793b694a..d67ac0e5d438 100644
--- a/arch/arm/mach-iop3xx/iop321-time.c
+++ b/arch/arm/mach-iop3xx/iop321-time.c
@@ -23,7 +23,6 @@
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/irq.h> 24#include <asm/irq.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/mach-types.h>
27#include <asm/mach/irq.h> 26#include <asm/mach/irq.h>
28#include <asm/mach/time.h> 27#include <asm/mach/time.h>
29 28
diff --git a/arch/arm/mach-iop3xx/iop331-time.c b/arch/arm/mach-iop3xx/iop331-time.c
index 8eddfac7e2b0..3c1f0ebbd636 100644
--- a/arch/arm/mach-iop3xx/iop331-time.c
+++ b/arch/arm/mach-iop3xx/iop331-time.c
@@ -23,7 +23,6 @@
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/irq.h> 24#include <asm/irq.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/mach-types.h>
27#include <asm/mach/irq.h> 26#include <asm/mach/irq.h>
28#include <asm/mach/time.h> 27#include <asm/mach/time.h>
29 28
diff --git a/arch/arm/mach-iop3xx/iq31244-mm.c b/arch/arm/mach-iop3xx/iq31244-mm.c
index b01042f7de71..55992ab586ba 100644
--- a/arch/arm/mach-iop3xx/iq31244-mm.c
+++ b/arch/arm/mach-iop3xx/iq31244-mm.c
@@ -21,7 +21,6 @@
21#include <asm/page.h> 21#include <asm/page.h>
22 22
23#include <asm/mach/map.h> 23#include <asm/mach/map.h>
24#include <asm/mach-types.h>
25 24
26 25
27/* 26/*
diff --git a/arch/arm/mach-iop3xx/iq80321-mm.c b/arch/arm/mach-iop3xx/iq80321-mm.c
index 1580c7ed2b9d..bb3e9e5a9aff 100644
--- a/arch/arm/mach-iop3xx/iq80321-mm.c
+++ b/arch/arm/mach-iop3xx/iq80321-mm.c
@@ -21,7 +21,6 @@
21#include <asm/page.h> 21#include <asm/page.h>
22 22
23#include <asm/mach/map.h> 23#include <asm/mach/map.h>
24#include <asm/mach-types.h>
25 24
26 25
27/* 26/*
diff --git a/arch/arm/mach-iop3xx/iq80331-mm.c b/arch/arm/mach-iop3xx/iq80331-mm.c
index ee8c333e115f..129eb49b0670 100644
--- a/arch/arm/mach-iop3xx/iq80331-mm.c
+++ b/arch/arm/mach-iop3xx/iq80331-mm.c
@@ -21,7 +21,6 @@
21#include <asm/page.h> 21#include <asm/page.h>
22 22
23#include <asm/mach/map.h> 23#include <asm/mach/map.h>
24#include <asm/mach-types.h>
25 24
26 25
27/* 26/*
diff --git a/arch/arm/mach-iop3xx/iq80332-mm.c b/arch/arm/mach-iop3xx/iq80332-mm.c
index 084afcdfb1eb..2feaf7591f53 100644
--- a/arch/arm/mach-iop3xx/iq80332-mm.c
+++ b/arch/arm/mach-iop3xx/iq80332-mm.c
@@ -21,7 +21,6 @@
21#include <asm/page.h> 21#include <asm/page.h>
22 22
23#include <asm/mach/map.h> 23#include <asm/mach/map.h>
24#include <asm/mach-types.h>
25 24
26 25
27/* 26/*
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index 74bd2fd602d4..f4d7f1f6ef85 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -30,7 +30,6 @@
30#include <asm/setup.h> 30#include <asm/setup.h>
31#include <asm/memory.h> 31#include <asm/memory.h>
32#include <asm/hardware.h> 32#include <asm/hardware.h>
33#include <asm/mach-types.h>
34#include <asm/irq.h> 33#include <asm/irq.h>
35#include <asm/system.h> 34#include <asm/system.h>
36#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
@@ -168,7 +167,7 @@ static struct plat_serial8250_port ixp2000_serial_port[] = {
168 167
169static struct resource ixp2000_uart_resource = { 168static struct resource ixp2000_uart_resource = {
170 .start = IXP2000_UART_PHYS_BASE, 169 .start = IXP2000_UART_PHYS_BASE,
171 .end = IXP2000_UART_PHYS_BASE + 0xffff, 170 .end = IXP2000_UART_PHYS_BASE + 0x1f,
172 .flags = IORESOURCE_MEM, 171 .flags = IORESOURCE_MEM,
173}; 172};
174 173
diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c
index 0788fb2b5c10..522205acb316 100644
--- a/arch/arm/mach-ixp2000/pci.c
+++ b/arch/arm/mach-ixp2000/pci.c
@@ -28,7 +28,6 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/irq.h> 29#include <asm/irq.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/mach-types.h>
32#include <asm/hardware.h> 31#include <asm/hardware.h>
33 32
34#include <asm/mach/pci.h> 33#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 52ad11328e96..36b6045213ee 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -125,7 +125,8 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
125 } else if (type & IRQT_LOW) { 125 } else if (type & IRQT_LOW) {
126 int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; 126 int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
127 irq_type = IXP4XX_IRQ_LEVEL; 127 irq_type = IXP4XX_IRQ_LEVEL;
128 } 128 } else
129 return -EINVAL;
129 130
130 ixp4xx_config_irq(irq, irq_type); 131 ixp4xx_config_irq(irq, irq_type);
131 132
@@ -142,6 +143,8 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
142 143
143 /* Set the new style */ 144 /* Set the new style */
144 *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); 145 *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
146
147 return 0;
145} 148}
146 149
147static void ixp4xx_irq_mask(unsigned int irq) 150static void ixp4xx_irq_mask(unsigned int irq)
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 39b06ed80646..0a41080d2266 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -123,7 +123,7 @@ static void __init ixdp425_init(void)
123 platform_add_devices(ixdp425_devices, ARRAY_SIZE(ixdp425_devices)); 123 platform_add_devices(ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
124} 124}
125 125
126#ifdef CONFIG_ARCH_IXDP465 126#ifdef CONFIG_ARCH_IXDP425
127MACHINE_START(IXDP425, "Intel IXDP425 Development Platform") 127MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
128 /* Maintainer: MontaVista Software, Inc. */ 128 /* Maintainer: MontaVista Software, Inc. */
129 .phys_ram = PHYS_OFFSET, 129 .phys_ram = PHYS_OFFSET,
diff --git a/arch/arm/mach-l7200/core.c b/arch/arm/mach-l7200/core.c
index 2a7fee2a7635..5fd8c9f97f9a 100644
--- a/arch/arm/mach-l7200/core.c
+++ b/arch/arm/mach-l7200/core.c
@@ -12,7 +12,6 @@
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#include <asm/mach/map.h> 14#include <asm/mach/map.h>
15#include <asm/arch/hardware.h>
16 15
17/* 16/*
18 * IRQ base register 17 * IRQ base register
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
index c5efcd04fcbc..c02ef7c0f7ef 100644
--- a/arch/arm/mach-pxa/corgi_lcd.c
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -19,7 +19,6 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <asm/mach-types.h>
23#include <asm/arch/akita.h> 22#include <asm/arch/akita.h>
24#include <asm/arch/corgi.h> 23#include <asm/arch/corgi.h>
25#include <asm/arch/hardware.h> 24#include <asm/arch/hardware.h>
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index a45aaa115a76..d0660a8c4b70 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -34,6 +34,7 @@
34#include <asm/arch/udc.h> 34#include <asm/arch/udc.h>
35#include <asm/arch/pxafb.h> 35#include <asm/arch/pxafb.h>
36#include <asm/arch/mmc.h> 36#include <asm/arch/mmc.h>
37#include <asm/arch/i2c.h>
37 38
38#include "generic.h" 39#include "generic.h"
39 40
diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c
index f021fd82be52..5098b50158a3 100644
--- a/arch/arm/mach-s3c2410/usb-simtec.c
+++ b/arch/arm/mach-s3c2410/usb-simtec.c
@@ -40,7 +40,6 @@
40#include <asm/hardware.h> 40#include <asm/hardware.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/mach-types.h>
44 43
45#include "devs.h" 44#include "devs.h"
46#include "usb-simtec.h" 45#include "usb-simtec.h"
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 3c8862fde51a..a30e0451df72 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -30,7 +30,6 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/leds.h> 32#include <asm/leds.h>
33#include <asm/mach-types.h>
34#include <asm/hardware/amba.h> 33#include <asm/hardware/amba.h>
35#include <asm/hardware/amba_clcd.h> 34#include <asm/hardware/amba_clcd.h>
36#include <asm/hardware/arm_timer.h> 35#include <asm/hardware/arm_timer.h>
@@ -52,8 +51,9 @@
52 * 51 *
53 * Setup a VA for the Versatile Vectored Interrupt Controller. 52 * Setup a VA for the Versatile Vectored Interrupt Controller.
54 */ 53 */
55#define VA_VIC_BASE IO_ADDRESS(VERSATILE_VIC_BASE) 54#define __io_address(n) __io(IO_ADDRESS(n))
56#define VA_SIC_BASE IO_ADDRESS(VERSATILE_SIC_BASE) 55#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
56#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
57 57
58static void vic_mask_irq(unsigned int irq) 58static void vic_mask_irq(unsigned int irq)
59{ 59{
@@ -214,7 +214,7 @@ void __init versatile_map_io(void)
214 iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc)); 214 iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc));
215} 215}
216 216
217#define VERSATILE_REFCOUNTER (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET) 217#define VERSATILE_REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET)
218 218
219/* 219/*
220 * This is the Versatile sched_clock implementation. This has 220 * This is the Versatile sched_clock implementation. This has
@@ -231,7 +231,7 @@ unsigned long long sched_clock(void)
231} 231}
232 232
233 233
234#define VERSATILE_FLASHCTRL (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET) 234#define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET)
235 235
236static int versatile_flash_init(void) 236static int versatile_flash_init(void)
237{ 237{
@@ -309,7 +309,7 @@ static struct platform_device smc91x_device = {
309 .resource = smc91x_resources, 309 .resource = smc91x_resources,
310}; 310};
311 311
312#define VERSATILE_SYSMCI (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET) 312#define VERSATILE_SYSMCI (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET)
313 313
314unsigned int mmc_status(struct device *dev) 314unsigned int mmc_status(struct device *dev)
315{ 315{
@@ -343,11 +343,11 @@ static const struct icst307_params versatile_oscvco_params = {
343 343
344static void versatile_oscvco_set(struct clk *clk, struct icst307_vco vco) 344static void versatile_oscvco_set(struct clk *clk, struct icst307_vco vco)
345{ 345{
346 unsigned long sys_lock = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET; 346 void __iomem *sys_lock = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET;
347#if defined(CONFIG_ARCH_VERSATILE_PB) 347#if defined(CONFIG_ARCH_VERSATILE_PB)
348 unsigned long sys_osc = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC4_OFFSET; 348 void __iomem *sys_osc = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC4_OFFSET;
349#elif defined(CONFIG_MACH_VERSATILE_AB) 349#elif defined(CONFIG_MACH_VERSATILE_AB)
350 unsigned long sys_osc = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC1_OFFSET; 350 void __iomem *sys_osc = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC1_OFFSET;
351#endif 351#endif
352 u32 val; 352 u32 val;
353 353
@@ -483,7 +483,7 @@ static struct clcd_panel epson_2_2_in = {
483 */ 483 */
484static struct clcd_panel *versatile_clcd_panel(void) 484static struct clcd_panel *versatile_clcd_panel(void)
485{ 485{
486 unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; 486 void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
487 struct clcd_panel *panel = &vga; 487 struct clcd_panel *panel = &vga;
488 u32 val; 488 u32 val;
489 489
@@ -510,7 +510,7 @@ static struct clcd_panel *versatile_clcd_panel(void)
510 */ 510 */
511static void versatile_clcd_disable(struct clcd_fb *fb) 511static void versatile_clcd_disable(struct clcd_fb *fb)
512{ 512{
513 unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; 513 void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
514 u32 val; 514 u32 val;
515 515
516 val = readl(sys_clcd); 516 val = readl(sys_clcd);
@@ -522,7 +522,7 @@ static void versatile_clcd_disable(struct clcd_fb *fb)
522 * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light off 522 * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light off
523 */ 523 */
524 if (fb->panel == &sanyo_2_5_in) { 524 if (fb->panel == &sanyo_2_5_in) {
525 unsigned long versatile_ib2_ctrl = IO_ADDRESS(VERSATILE_IB2_CTRL); 525 void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL);
526 unsigned long ctrl; 526 unsigned long ctrl;
527 527
528 ctrl = readl(versatile_ib2_ctrl); 528 ctrl = readl(versatile_ib2_ctrl);
@@ -537,7 +537,7 @@ static void versatile_clcd_disable(struct clcd_fb *fb)
537 */ 537 */
538static void versatile_clcd_enable(struct clcd_fb *fb) 538static void versatile_clcd_enable(struct clcd_fb *fb)
539{ 539{
540 unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; 540 void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET;
541 u32 val; 541 u32 val;
542 542
543 val = readl(sys_clcd); 543 val = readl(sys_clcd);
@@ -571,7 +571,7 @@ static void versatile_clcd_enable(struct clcd_fb *fb)
571 * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light on 571 * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light on
572 */ 572 */
573 if (fb->panel == &sanyo_2_5_in) { 573 if (fb->panel == &sanyo_2_5_in) {
574 unsigned long versatile_ib2_ctrl = IO_ADDRESS(VERSATILE_IB2_CTRL); 574 void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL);
575 unsigned long ctrl; 575 unsigned long ctrl;
576 576
577 ctrl = readl(versatile_ib2_ctrl); 577 ctrl = readl(versatile_ib2_ctrl);
@@ -720,7 +720,7 @@ static struct amba_device *amba_devs[] __initdata = {
720}; 720};
721 721
722#ifdef CONFIG_LEDS 722#ifdef CONFIG_LEDS
723#define VA_LEDS_BASE (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET) 723#define VA_LEDS_BASE (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET)
724 724
725static void versatile_leds_event(led_event_t ledevt) 725static void versatile_leds_event(led_event_t ledevt)
726{ 726{
@@ -778,11 +778,11 @@ void __init versatile_init(void)
778/* 778/*
779 * Where is the timer (VA)? 779 * Where is the timer (VA)?
780 */ 780 */
781#define TIMER0_VA_BASE IO_ADDRESS(VERSATILE_TIMER0_1_BASE) 781#define TIMER0_VA_BASE __io_address(VERSATILE_TIMER0_1_BASE)
782#define TIMER1_VA_BASE (IO_ADDRESS(VERSATILE_TIMER0_1_BASE) + 0x20) 782#define TIMER1_VA_BASE (__io_address(VERSATILE_TIMER0_1_BASE) + 0x20)
783#define TIMER2_VA_BASE IO_ADDRESS(VERSATILE_TIMER2_3_BASE) 783#define TIMER2_VA_BASE __io_address(VERSATILE_TIMER2_3_BASE)
784#define TIMER3_VA_BASE (IO_ADDRESS(VERSATILE_TIMER2_3_BASE) + 0x20) 784#define TIMER3_VA_BASE (__io_address(VERSATILE_TIMER2_3_BASE) + 0x20)
785#define VA_IC_BASE IO_ADDRESS(VERSATILE_VIC_BASE) 785#define VA_IC_BASE __io_address(VERSATILE_VIC_BASE)
786 786
787/* 787/*
788 * How long is the timer interval? 788 * How long is the timer interval?
@@ -877,12 +877,12 @@ static void __init versatile_timer_init(void)
877 * VERSATILE_REFCLK is 32KHz 877 * VERSATILE_REFCLK is 32KHz
878 * VERSATILE_TIMCLK is 1MHz 878 * VERSATILE_TIMCLK is 1MHz
879 */ 879 */
880 val = readl(IO_ADDRESS(VERSATILE_SCTL_BASE)); 880 val = readl(__io_address(VERSATILE_SCTL_BASE));
881 writel((VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel) | 881 writel((VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel) |
882 (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel) | 882 (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel) |
883 (VERSATILE_TIMCLK << VERSATILE_TIMER3_EnSel) | 883 (VERSATILE_TIMCLK << VERSATILE_TIMER3_EnSel) |
884 (VERSATILE_TIMCLK << VERSATILE_TIMER4_EnSel) | val, 884 (VERSATILE_TIMCLK << VERSATILE_TIMER4_EnSel) | val,
885 IO_ADDRESS(VERSATILE_SCTL_BASE)); 885 __io_address(VERSATILE_SCTL_BASE));
886 886
887 /* 887 /*
888 * Initialise to a known state (all timers off) 888 * Initialise to a known state (all timers off)
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index d1565e851f0e..b80d57d51699 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -29,7 +29,6 @@
29#include <asm/irq.h> 29#include <asm/irq.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/mach/pci.h> 31#include <asm/mach/pci.h>
32#include <asm/mach-types.h>
33 32
34/* 33/*
35 * these spaces are mapped using the following base registers: 34 * these spaces are mapped using the following base registers:
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8f76f3df7b4c..dbd346033122 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -20,6 +20,11 @@
20 */ 20 */
21 .align 5 21 .align 5
22ENTRY(v6_early_abort) 22ENTRY(v6_early_abort)
23#ifdef CONFIG_CPU_MPCORE
24 clrex
25#else
26 strex r0, r1, [sp] @ Clear the exclusive monitor
27#endif
23 mrc p15, 0, r1, c5, c0, 0 @ get FSR 28 mrc p15, 0, r1, c5, c0, 0 @ get FSR
24 mrc p15, 0, r0, c6, c0, 0 @ get FAR 29 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25/* 30/*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 85c10a71e7c6..72966d90e956 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -18,6 +18,7 @@
18#define HARVARD_CACHE 18#define HARVARD_CACHE
19#define CACHE_LINE_SIZE 32 19#define CACHE_LINE_SIZE 32
20#define D_CACHE_LINE_SIZE 32 20#define D_CACHE_LINE_SIZE 32
21#define BTB_FLUSH_SIZE 8
21 22
22/* 23/*
23 * v6_flush_cache_all() 24 * v6_flush_cache_all()
@@ -98,7 +99,13 @@ ENTRY(v6_coherent_user_range)
98 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line 99 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
99#endif 100#endif
100 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry 101 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
101 add r0, r0, #CACHE_LINE_SIZE 102 add r0, r0, #BTB_FLUSH_SIZE
103 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
104 add r0, r0, #BTB_FLUSH_SIZE
105 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
106 add r0, r0, #BTB_FLUSH_SIZE
107 mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
108 add r0, r0, #BTB_FLUSH_SIZE
102 cmp r0, r1 109 cmp r0, r1
103 blo 1b 110 blo 1b
104#ifdef HARVARD_CACHE 111#ifdef HARVARD_CACHE
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b0208c992576..c9a03981b785 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,24 @@
17 17
18#ifdef CONFIG_CPU_CACHE_VIPT 18#ifdef CONFIG_CPU_CACHE_VIPT
19 19
20#define ALIAS_FLUSH_START 0xffff4000
21
22#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
23
24static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
25{
26 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
27
28 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
29 flush_tlb_kernel_page(to);
30
31 asm( "mcrr p15, 0, %1, %0, c14\n"
32 " mcrr p15, 0, %1, %0, c5\n"
33 :
34 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
35 : "cc");
36}
37
20void flush_cache_mm(struct mm_struct *mm) 38void flush_cache_mm(struct mm_struct *mm)
21{ 39{
22 if (cache_is_vivt()) { 40 if (cache_is_vivt()) {
@@ -67,24 +85,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
67 if (cache_is_vipt_aliasing()) 85 if (cache_is_vipt_aliasing())
68 flush_pfn_alias(pfn, user_addr); 86 flush_pfn_alias(pfn, user_addr);
69} 87}
70
71#define ALIAS_FLUSH_START 0xffff4000
72
73#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
74
75static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
76{
77 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
78
79 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
80 flush_tlb_kernel_page(to);
81
82 asm( "mcrr p15, 0, %1, %0, c14\n"
83 " mcrr p15, 0, %1, %0, c5\n"
84 :
85 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
86 : "cc");
87}
88#else 88#else
89#define flush_pfn_alias(pfn,vaddr) do { } while (0) 89#define flush_pfn_alias(pfn,vaddr) do { } while (0)
90#endif 90#endif
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 6cb20aea7f51..02bcc6c1cd1b 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -25,7 +25,6 @@
25#include <asm/mach/map.h> 25#include <asm/mach/map.h>
26#include <asm/hardware/clock.h> 26#include <asm/hardware/clock.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/mach-types.h>
29#include <asm/setup.h> 28#include <asm/setup.h>
30 29
31#include <asm/arch/board.h> 30#include <asm/arch/board.h>
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index 409aac2c4b9d..fd894bb00107 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22 22
23#include <asm/hardware.h> 23#include <asm/hardware.h>
24#include <asm/mach-types.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/system.h> 25#include <asm/system.h>
27 26
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 98f1c76f8660..14a836d7ac25 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -33,7 +33,6 @@
33#include <asm/irq.h> 33#include <asm/irq.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/hardware.h> 35#include <asm/hardware.h>
36#include <asm/mach-types.h>
37 36
38#include <asm/arch/mux.h> 37#include <asm/arch/mux.h>
39#include <asm/arch/usb.h> 38#include <asm/arch/usb.h>
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index a63351c085c6..b66c13c0cc0f 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -27,15 +27,14 @@
27#include <linux/config.h> 27#include <linux/config.h>
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/irq.h>
31#include <linux/module.h> 30#include <linux/module.h>
32#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/irq.h>
33 33
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/io_apic.h> 35#include <asm/io_apic.h>
36#include <asm/apic.h> 36#include <asm/apic.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/mpspec.h> 38#include <asm/mpspec.h>
40 39
41#ifdef CONFIG_X86_64 40#ifdef CONFIG_X86_64
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index a22a866de8f9..5546ddebec33 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -18,7 +18,6 @@
18#include <linux/init.h> 18#include <linux/init.h>
19 19
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/irq.h>
22#include <linux/delay.h> 21#include <linux/delay.h>
23#include <linux/bootmem.h> 22#include <linux/bootmem.h>
24#include <linux/smp_lock.h> 23#include <linux/smp_lock.h>
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 73aeaf5a9d4e..4c1ddf2b57cc 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -28,6 +28,22 @@ static void __init init_amd(struct cpuinfo_x86 *c)
28 int mbytes = num_physpages >> (20-PAGE_SHIFT); 28 int mbytes = num_physpages >> (20-PAGE_SHIFT);
29 int r; 29 int r;
30 30
31#ifdef CONFIG_SMP
32 unsigned long value;
33
34 /* Disable TLB flush filter by setting HWCR.FFDIS on K8
35 * bit 6 of msr C001_0015
36 *
37 * Errata 63 for SH-B3 steppings
38 * Errata 122 for all steppings (F+ have it disabled by default)
39 */
40 if (c->x86 == 15) {
41 rdmsrl(MSR_K7_HWCR, value);
42 value |= 1 << 6;
43 wrmsrl(MSR_K7_HWCR, value);
44 }
45#endif
46
31 /* 47 /*
32 * FIXME: We should handle the K5 here. Set up the write 48 * FIXME: We should handle the K5 here. Set up the write
33 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 49 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
diff --git a/arch/i386/kernel/cpu/mcheck/k7.c b/arch/i386/kernel/cpu/mcheck/k7.c
index c4abe7657397..7c6b9c73522f 100644
--- a/arch/i386/kernel/cpu/mcheck/k7.c
+++ b/arch/i386/kernel/cpu/mcheck/k7.c
@@ -7,7 +7,6 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/config.h> 9#include <linux/config.h>
10#include <linux/irq.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
13 12
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 7864ddfccf07..82dffe0d4954 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -12,7 +12,6 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/jiffies.h> 13#include <linux/jiffies.h>
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/irq.h>
16#include <linux/workqueue.h> 15#include <linux/workqueue.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18#include <linux/smp.h> 17#include <linux/smp.h>
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c
index 0abccb6fdf9e..1d1e885f500a 100644
--- a/arch/i386/kernel/cpu/mcheck/p4.c
+++ b/arch/i386/kernel/cpu/mcheck/p4.c
@@ -6,7 +6,6 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/irq.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
11#include <linux/smp.h> 10#include <linux/smp.h>
12 11
diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c
index ec0614cd2925..3a2e24baddc7 100644
--- a/arch/i386/kernel/cpu/mcheck/p5.c
+++ b/arch/i386/kernel/cpu/mcheck/p5.c
@@ -6,7 +6,6 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/irq.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
11#include <linux/smp.h> 10#include <linux/smp.h>
12 11
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index f01b73f947e1..3c035b8fa3d9 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -6,7 +6,6 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/irq.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
11#include <linux/smp.h> 10#include <linux/smp.h>
12 11
diff --git a/arch/i386/kernel/cpu/mcheck/winchip.c b/arch/i386/kernel/cpu/mcheck/winchip.c
index 7bae68fa168f..5b9d2dd411d3 100644
--- a/arch/i386/kernel/cpu/mcheck/winchip.c
+++ b/arch/i386/kernel/cpu/mcheck/winchip.c
@@ -6,7 +6,6 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/irq.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
11 10
12#include <asm/processor.h> 11#include <asm/processor.h>
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 913be77bb844..0248e084017c 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -11,10 +11,8 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/irq.h>
15#include <linux/reboot.h> 14#include <linux/reboot.h>
16#include <linux/kexec.h> 15#include <linux/kexec.h>
17#include <linux/irq.h>
18#include <linux/delay.h> 16#include <linux/delay.h>
19#include <linux/elf.h> 17#include <linux/elf.h>
20#include <linux/elfcore.h> 18#include <linux/elfcore.h>
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 178f4e9bac9d..323ef8ab3244 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -16,7 +16,6 @@
16#include <asm/atomic.h> 16#include <asm/atomic.h>
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/irq.h>
20#include <asm/timer.h> 19#include <asm/timer.h>
21#include <asm/pgtable.h> 20#include <asm/pgtable.h>
22#include <asm/delay.h> 21#include <asm/delay.h>
@@ -25,8 +24,6 @@
25#include <asm/arch_hooks.h> 24#include <asm/arch_hooks.h>
26#include <asm/i8259.h> 25#include <asm/i8259.h>
27 26
28#include <linux/irq.h>
29
30#include <io_ports.h> 27#include <io_ports.h>
31 28
32/* 29/*
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 378313b0cce9..fb3991e8229e 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -21,7 +21,6 @@
21 */ 21 */
22 22
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/irq.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
26#include <linux/init.h> 25#include <linux/init.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 15949fd08109..27aabfceb67e 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/irq.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/acpi.h> 18#include <linux/acpi.h>
20#include <linux/delay.h> 19#include <linux/delay.h>
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 0178457db721..72515b8a1b12 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/irq.h>
19#include <linux/delay.h> 18#include <linux/delay.h>
20#include <linux/bootmem.h> 19#include <linux/bootmem.h>
21#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b45cbf93d439..7a14fdfd3af9 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -47,13 +47,11 @@
47#include <asm/ldt.h> 47#include <asm/ldt.h>
48#include <asm/processor.h> 48#include <asm/processor.h>
49#include <asm/i387.h> 49#include <asm/i387.h>
50#include <asm/irq.h>
51#include <asm/desc.h> 50#include <asm/desc.h>
52#ifdef CONFIG_MATH_EMULATION 51#ifdef CONFIG_MATH_EMULATION
53#include <asm/math_emu.h> 52#include <asm/math_emu.h>
54#endif 53#endif
55 54
56#include <linux/irq.h>
57#include <linux/err.h> 55#include <linux/err.h>
58 56
59#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 48b55db3680f..218d725a5a1e 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -11,7 +11,6 @@
11#include <linux/init.h> 11#include <linux/init.h>
12 12
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/irq.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/spinlock.h> 15#include <linux/spinlock.h>
17#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 5f0a95d76a4f..1fb26d0e30b6 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -42,7 +42,6 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/kernel_stat.h> 43#include <linux/kernel_stat.h>
44#include <linux/smp_lock.h> 44#include <linux/smp_lock.h>
45#include <linux/irq.h>
46#include <linux/bootmem.h> 45#include <linux/bootmem.h>
47#include <linux/notifier.h> 46#include <linux/notifier.h>
48#include <linux/cpu.h> 47#include <linux/cpu.h>
diff --git a/arch/i386/kernel/timers/timer_pit.c b/arch/i386/kernel/timers/timer_pit.c
index eddb64038234..e42e46d35159 100644
--- a/arch/i386/kernel/timers/timer_pit.c
+++ b/arch/i386/kernel/timers/timer_pit.c
@@ -6,7 +6,6 @@
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/irq.h>
10#include <linux/sysdev.h> 9#include <linux/sysdev.h>
11#include <linux/timex.h> 10#include <linux/timex.h>
12#include <asm/delay.h> 11#include <asm/delay.h>
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 431a551e46ea..19e90bdd84ea 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -52,7 +52,6 @@
52#include <asm/arch_hooks.h> 52#include <asm/arch_hooks.h>
53#include <asm/kdebug.h> 53#include <asm/kdebug.h>
54 54
55#include <linux/irq.h>
56#include <linux/module.h> 55#include <linux/module.h>
57 56
58#include "mach_traps.h" 57#include "mach_traps.h"
diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c
index e5a1a83d09ef..b4a7455c6993 100644
--- a/arch/i386/mach-default/setup.c
+++ b/arch/i386/mach-default/setup.c
@@ -5,7 +5,6 @@
5#include <linux/config.h> 5#include <linux/config.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h>
9#include <linux/interrupt.h> 8#include <linux/interrupt.h>
10#include <asm/acpi.h> 9#include <asm/acpi.h>
11#include <asm/arch_hooks.h> 10#include <asm/arch_hooks.h>
diff --git a/arch/i386/mach-visws/setup.c b/arch/i386/mach-visws/setup.c
index 26ada6fc0d77..07fac7e749c7 100644
--- a/arch/i386/mach-visws/setup.c
+++ b/arch/i386/mach-visws/setup.c
@@ -5,7 +5,6 @@
5 5
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h>
9#include <linux/interrupt.h> 8#include <linux/interrupt.h>
10 9
11#include <asm/fixmap.h> 10#include <asm/fixmap.h>
diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c
index 04e6585849a2..3e64fb721291 100644
--- a/arch/i386/mach-visws/visws_apic.c
+++ b/arch/i386/mach-visws/visws_apic.c
@@ -19,7 +19,6 @@
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
24#include <linux/init.h> 23#include <linux/init.h>
25 24
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index df123fc487bb..7d8a3acb9441 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -4,7 +4,6 @@
4 4
5#include <linux/config.h> 5#include <linux/config.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/irq.h>
8#include <linux/interrupt.h> 7#include <linux/interrupt.h>
9#include <asm/acpi.h> 8#include <asm/acpi.h>
10#include <asm/arch_hooks.h> 9#include <asm/arch_hooks.h>
diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c
index cc69875d979b..aa49a33a572c 100644
--- a/arch/i386/mach-voyager/voyager_basic.c
+++ b/arch/i386/mach-voyager/voyager_basic.c
@@ -27,7 +27,6 @@
27#include <asm/voyager.h> 27#include <asm/voyager.h>
28#include <asm/vic.h> 28#include <asm/vic.h>
29#include <linux/pm.h> 29#include <linux/pm.h>
30#include <linux/irq.h>
31#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
32#include <asm/arch_hooks.h> 31#include <asm/arch_hooks.h>
33#include <asm/i8253.h> 32#include <asm/i8253.h>
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 46b0cf4a31e0..72a1b9cae2e4 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -30,8 +30,6 @@
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/arch_hooks.h> 31#include <asm/arch_hooks.h>
32 32
33#include <linux/irq.h>
34
35/* TLB state -- visible externally, indexed physically */ 33/* TLB state -- visible externally, indexed physically */
36DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; 34DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
37 35
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c
index a9341b0eebff..2b03884fdb2a 100644
--- a/arch/i386/mach-voyager/voyager_thread.c
+++ b/arch/i386/mach-voyager/voyager_thread.c
@@ -31,8 +31,6 @@
31#include <asm/mtrr.h> 31#include <asm/mtrr.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33 33
34#include <linux/irq.h>
35
36#define THREAD_NAME "kvoyagerd" 34#define THREAD_NAME "kvoyagerd"
37 35
38/* external variables */ 36/* external variables */
diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c
index ad93cdd55d63..930a1127bb30 100644
--- a/arch/i386/oprofile/nmi_timer_int.c
+++ b/arch/i386/oprofile/nmi_timer_int.c
@@ -9,7 +9,7 @@
9 9
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/errno.h>
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15 15
diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c
index 2941674f35eb..4c4522b43be5 100644
--- a/arch/i386/pci/acpi.c
+++ b/arch/i386/pci/acpi.c
@@ -2,7 +2,6 @@
2#include <linux/acpi.h> 2#include <linux/acpi.h>
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/irq.h> 4#include <linux/irq.h>
5#include <asm/hw_irq.h>
6#include <asm/numa.h> 5#include <asm/numa.h>
7#include "pci.h" 6#include "pci.h"
8 7
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 326a2edc3834..cddafe33ff7c 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -11,12 +11,11 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/dmi.h> 14#include <linux/dmi.h>
16#include <asm/io.h> 15#include <asm/io.h>
17#include <asm/smp.h> 16#include <asm/smp.h>
18#include <asm/io_apic.h> 17#include <asm/io_apic.h>
19#include <asm/hw_irq.h> 18#include <linux/irq.h>
20#include <linux/acpi.h> 19#include <linux/acpi.h>
21 20
22#include "pci.h" 21#include "pci.h"
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 7b0b9ad848e5..b27c5acc79d0 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -8,25 +8,8 @@
8 */ 8 */
9 9
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/spinlock.h>
16#include <linux/poll.h>
17#include <linux/delay.h>
18#include <linux/sysrq.h>
19#include <linux/proc_fs.h>
20#include <linux/irq.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/suspend.h> 12#include <linux/suspend.h>
24#include <linux/acpi.h>
25
26#include <asm/uaccess.h>
27#include <asm/acpi.h>
28#include <asm/tlbflush.h>
29#include <asm/processor.h>
30 13
31static struct saved_context saved_context; 14static struct saved_context saved_context;
32 15
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 56405dbfd739..a18983a3c934 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
233 simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); 233 simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512);
234} 234}
235 235
236static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
237{
238
239 int scatterlen = sc->use_sg;
240 struct scatterlist *slp;
241
242 if (scatterlen == 0)
243 memcpy(sc->request_buffer, buf, len);
244 else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
245 unsigned thislen = min(len, slp->length);
246
247 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
248 slp++;
249 len -= thislen;
250 }
251}
252
236static int 253static int
237simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 254simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
238{ 255{
@@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
240 char fname[MAX_ROOT_LEN+16]; 257 char fname[MAX_ROOT_LEN+16];
241 size_t disk_size; 258 size_t disk_size;
242 char *buf; 259 char *buf;
260 char localbuf[36];
243#if DEBUG_SIMSCSI 261#if DEBUG_SIMSCSI
244 register long sp asm ("sp"); 262 register long sp asm ("sp");
245 263
@@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
263 /* disk doesn't exist... */ 281 /* disk doesn't exist... */
264 break; 282 break;
265 } 283 }
266 buf = sc->request_buffer; 284 buf = localbuf;
267 buf[0] = 0; /* magnetic disk */ 285 buf[0] = 0; /* magnetic disk */
268 buf[1] = 0; /* not a removable medium */ 286 buf[1] = 0; /* not a removable medium */
269 buf[2] = 2; /* SCSI-2 compliant device */ 287 buf[2] = 2; /* SCSI-2 compliant device */
@@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
273 buf[6] = 0; /* reserved */ 291 buf[6] = 0; /* reserved */
274 buf[7] = 0; /* various flags */ 292 buf[7] = 0; /* various flags */
275 memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); 293 memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28);
294 simscsi_fillresult(sc, buf, 36);
276 sc->result = GOOD; 295 sc->result = GOOD;
277 break; 296 break;
278 297
@@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
304 simscsi_readwrite10(sc, SSC_WRITE); 323 simscsi_readwrite10(sc, SSC_WRITE);
305 break; 324 break;
306 325
307
308 case READ_CAPACITY: 326 case READ_CAPACITY:
309 if (desc[target_id] < 0 || sc->request_bufflen < 8) { 327 if (desc[target_id] < 0 || sc->request_bufflen < 8) {
310 break; 328 break;
311 } 329 }
312 buf = sc->request_buffer; 330 buf = localbuf;
313
314 disk_size = simscsi_get_disk_size(desc[target_id]); 331 disk_size = simscsi_get_disk_size(desc[target_id]);
315 332
316 /* pretend to be a 1GB disk (partition table contains real stuff): */
317 buf[0] = (disk_size >> 24) & 0xff; 333 buf[0] = (disk_size >> 24) & 0xff;
318 buf[1] = (disk_size >> 16) & 0xff; 334 buf[1] = (disk_size >> 16) & 0xff;
319 buf[2] = (disk_size >> 8) & 0xff; 335 buf[2] = (disk_size >> 8) & 0xff;
@@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
323 buf[5] = 0; 339 buf[5] = 0;
324 buf[6] = 2; 340 buf[6] = 2;
325 buf[7] = 0; 341 buf[7] = 0;
342 simscsi_fillresult(sc, buf, 8);
326 sc->result = GOOD; 343 sc->result = GOOD;
327 break; 344 break;
328 345
329 case MODE_SENSE: 346 case MODE_SENSE:
330 case MODE_SENSE_10: 347 case MODE_SENSE_10:
331 /* sd.c uses this to determine whether disk does write-caching. */ 348 /* sd.c uses this to determine whether disk does write-caching. */
332 memset(sc->request_buffer, 0, 128); 349 simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen);
333 sc->result = GOOD; 350 sc->result = GOOD;
334 break; 351 break;
335 352
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 499a065f4e60..db32fc1d3935 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -489,24 +489,27 @@ ia64_state_save:
489 ;; 489 ;;
490 st8 [temp1]=r17,16 // pal_min_state 490 st8 [temp1]=r17,16 // pal_min_state
491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT 491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
492 mov r6=IA64_KR(CURRENT_STACK)
493 ;;
494 st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
495 st8 [temp2]=r0,16 // prev_task, starts off as NULL
492 mov r6=cr.ifa 496 mov r6=cr.ifa
493 ;; 497 ;;
494 st8 [temp1]=r0,16 // prev_task, starts off as NULL 498 st8 [temp1]=r12,16 // cr.isr
495 st8 [temp2]=r12,16 // cr.isr 499 st8 [temp2]=r6,16 // cr.ifa
496 mov r12=cr.itir 500 mov r12=cr.itir
497 ;; 501 ;;
498 st8 [temp1]=r6,16 // cr.ifa 502 st8 [temp1]=r12,16 // cr.itir
499 st8 [temp2]=r12,16 // cr.itir 503 st8 [temp2]=r11,16 // cr.iipa
500 mov r12=cr.iim 504 mov r12=cr.iim
501 ;; 505 ;;
502 st8 [temp1]=r11,16 // cr.iipa 506 st8 [temp1]=r12,16 // cr.iim
503 st8 [temp2]=r12,16 // cr.iim
504 mov r6=cr.iha
505(p1) mov r12=IA64_MCA_COLD_BOOT 507(p1) mov r12=IA64_MCA_COLD_BOOT
506(p2) mov r12=IA64_INIT_WARM_BOOT 508(p2) mov r12=IA64_INIT_WARM_BOOT
509 mov r6=cr.iha
507 ;; 510 ;;
508 st8 [temp1]=r6,16 // cr.iha 511 st8 [temp2]=r6,16 // cr.iha
509 st8 [temp2]=r12 // os_status, default is cold boot 512 st8 [temp1]=r12 // os_status, default is cold boot
510 mov r6=IA64_MCA_SAME_CONTEXT 513 mov r6=IA64_MCA_SAME_CONTEXT
511 ;; 514 ;;
512 st8 [temp1]=r6 // context, default is same context 515 st8 [temp1]=r6 // context, default is same context
@@ -823,9 +826,12 @@ ia64_state_restore:
823 ld8 r12=[temp1],16 // sal_ra 826 ld8 r12=[temp1],16 // sal_ra
824 ld8 r9=[temp2],16 // sal_gp 827 ld8 r9=[temp2],16 // sal_gp
825 ;; 828 ;;
826 ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task 829 ld8 r22=[temp1],16 // pal_min_state, virtual
827 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT 830 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
828 ;; 831 ;;
832 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
833 ld8 r20=[temp2],16 // prev_task
834 ;;
829 ld8 temp3=[temp1],16 // cr.isr 835 ld8 temp3=[temp1],16 // cr.isr
830 ld8 temp4=[temp2],16 // cr.ifa 836 ld8 temp4=[temp2],16 // cr.ifa
831 ;; 837 ;;
@@ -846,6 +852,45 @@ ia64_state_restore:
846 ld8 r8=[temp1] // os_status 852 ld8 r8=[temp1] // os_status
847 ld8 r10=[temp2] // context 853 ld8 r10=[temp2] // context
848 854
855 /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
856 * avoid any dependencies on the algorithm in ia64_switch_to(), just
857 * purge any existing CURRENT_STACK mapping and insert the new one.
858 *
859 * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
860 * prev_IA64_KR_CURRENT, these values may have been changed by the C
861 * code. Do not use r8, r9, r10, r22, they contain values ready for
862 * the return to SAL.
863 */
864
865 mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
866 ;;
867 shl r15=r15,IA64_GRANULE_SHIFT
868 ;;
869 dep r15=-1,r15,61,3 // virtual granule
870 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
871 ;;
872 ptr.d r15,r18
873 ;;
874 srlz.d
875
876 extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT
877 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
878 movl r21=PAGE_KERNEL // page properties
879 ;;
880 mov IA64_KR(CURRENT_STACK)=r16
881 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
882 or r21=r20,r21 // construct PA | page properties
883(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
884 ;;
885 mov cr.itir=r18
886 mov cr.ifa=r21
887 mov r20=IA64_TR_CURRENT_STACK
888 ;;
889 itr.d dtr[r20]=r21
890 ;;
891 srlz.d
8921:
893
849 br.sptk b0 894 br.sptk b0
850 895
851//EndStub////////////////////////////////////////////////////////////////////// 896//EndStub//////////////////////////////////////////////////////////////////////
@@ -982,6 +1027,7 @@ ia64_set_kernel_registers:
982 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp 1027 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
983 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack 1028 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
984 add r13=temp1, r3 // set current to start of MCA/INIT stack 1029 add r13=temp1, r3 // set current to start of MCA/INIT stack
1030 add r20=temp1, r3 // physical start of MCA/INIT stack
985 ;; 1031 ;;
986 ld8 r1=[temp4] // OS GP from SAL OS state 1032 ld8 r1=[temp4] // OS GP from SAL OS state
987 ;; 1033 ;;
@@ -991,7 +1037,35 @@ ia64_set_kernel_registers:
991 ;; 1037 ;;
992 mov IA64_KR(CURRENT)=r13 1038 mov IA64_KR(CURRENT)=r13
993 1039
994 // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? 1040 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
1041 * any dependencies on the algorithm in ia64_switch_to(), just purge
1042 * any existing CURRENT_STACK mapping and insert the new one.
1043 */
1044
1045 mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
1046 ;;
1047 shl r16=r16,IA64_GRANULE_SHIFT
1048 ;;
1049 dep r16=-1,r16,61,3 // virtual granule
1050 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
1051 ;;
1052 ptr.d r16,r18
1053 ;;
1054 srlz.d
1055
1056 shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
1057 movl r21=PAGE_KERNEL // page properties
1058 ;;
1059 mov IA64_KR(CURRENT_STACK)=r16
1060 or r21=r20,r21 // construct PA | page properties
1061 ;;
1062 mov cr.itir=r18
1063 mov cr.ifa=r13
1064 mov r20=IA64_TR_CURRENT_STACK
1065 ;;
1066 itr.d dtr[r20]=r21
1067 ;;
1068 srlz.d
995 1069
996 br.sptk b0 1070 br.sptk b0
997 1071
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 80f83d6cdbfc..f081c60ab206 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE];
56static int num_page_isolate = 0; 56static int num_page_isolate = 0;
57 57
58typedef enum { 58typedef enum {
59 ISOLATE_NG = 0, 59 ISOLATE_NG,
60 ISOLATE_OK = 1 60 ISOLATE_OK,
61 ISOLATE_NONE
61} isolate_status_t; 62} isolate_status_t;
62 63
63/* 64/*
@@ -74,7 +75,7 @@ static struct {
74 * @paddr: poisoned memory location 75 * @paddr: poisoned memory location
75 * 76 *
76 * Return value: 77 * Return value:
77 * ISOLATE_OK / ISOLATE_NG 78 * one of isolate_status_t, ISOLATE_OK/NG/NONE.
78 */ 79 */
79 80
80static isolate_status_t 81static isolate_status_t
@@ -85,7 +86,10 @@ mca_page_isolate(unsigned long paddr)
85 86
86 /* whether physical address is valid or not */ 87 /* whether physical address is valid or not */
87 if (!ia64_phys_addr_valid(paddr)) 88 if (!ia64_phys_addr_valid(paddr))
88 return ISOLATE_NG; 89 return ISOLATE_NONE;
90
91 if (!pfn_valid(paddr))
92 return ISOLATE_NONE;
89 93
90 /* convert physical address to physical page number */ 94 /* convert physical address to physical page number */
91 p = pfn_to_page(paddr>>PAGE_SHIFT); 95 p = pfn_to_page(paddr>>PAGE_SHIFT);
@@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr)
122 current->pid, current->comm); 126 current->pid, current->comm);
123 127
124 spin_lock(&mca_bh_lock); 128 spin_lock(&mca_bh_lock);
125 if (mca_page_isolate(paddr) == ISOLATE_OK) { 129 switch (mca_page_isolate(paddr)) {
130 case ISOLATE_OK:
126 printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); 131 printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
127 } else { 132 break;
133 case ISOLATE_NG:
128 printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); 134 printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
135 break;
136 default:
137 break;
129 } 138 }
130 spin_unlock(&mca_bh_lock); 139 spin_unlock(&mca_bh_lock);
131 140
diff --git a/arch/m32r/Makefile b/arch/m32r/Makefile
index dd4418d846e9..983d438b14b6 100644
--- a/arch/m32r/Makefile
+++ b/arch/m32r/Makefile
@@ -24,7 +24,7 @@ aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -Wa,-no-bitinst
24CFLAGS += $(cflags-y) 24CFLAGS += $(cflags-y)
25AFLAGS += $(aflags-y) 25AFLAGS += $(aflags-y)
26 26
27CHECKFLAGS := $(CHECK) -D__m32r__ 27CHECKFLAGS += -D__m32r__ -D__BIG_ENDIAN__=1
28 28
29head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o 29head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o
30 30
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
index 6c6855f1aa05..ce16bbe26a52 100644
--- a/arch/m32r/lib/usercopy.c
+++ b/arch/m32r/lib/usercopy.c
@@ -13,7 +13,7 @@
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15unsigned long 15unsigned long
16__generic_copy_to_user(void *to, const void *from, unsigned long n) 16__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
17{ 17{
18 prefetch(from); 18 prefetch(from);
19 if (access_ok(VERIFY_WRITE, to, n)) 19 if (access_ok(VERIFY_WRITE, to, n))
@@ -22,7 +22,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
22} 22}
23 23
24unsigned long 24unsigned long
25__generic_copy_from_user(void *to, const void *from, unsigned long n) 25__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
26{ 26{
27 prefetchw(to); 27 prefetchw(to);
28 if (access_ok(VERIFY_READ, from, n)) 28 if (access_ok(VERIFY_READ, from, n))
@@ -111,7 +111,7 @@ do { \
111#endif /* CONFIG_ISA_DUAL_ISSUE */ 111#endif /* CONFIG_ISA_DUAL_ISSUE */
112 112
113long 113long
114__strncpy_from_user(char *dst, const char *src, long count) 114__strncpy_from_user(char *dst, const char __user *src, long count)
115{ 115{
116 long res; 116 long res;
117 __do_strncpy_from_user(dst, src, count, res); 117 __do_strncpy_from_user(dst, src, count, res);
@@ -119,7 +119,7 @@ __strncpy_from_user(char *dst, const char *src, long count)
119} 119}
120 120
121long 121long
122strncpy_from_user(char *dst, const char *src, long count) 122strncpy_from_user(char *dst, const char __user *src, long count)
123{ 123{
124 long res = -EFAULT; 124 long res = -EFAULT;
125 if (access_ok(VERIFY_READ, src, 1)) 125 if (access_ok(VERIFY_READ, src, 1))
@@ -222,7 +222,7 @@ do { \
222#endif /* not CONFIG_ISA_DUAL_ISSUE */ 222#endif /* not CONFIG_ISA_DUAL_ISSUE */
223 223
224unsigned long 224unsigned long
225clear_user(void *to, unsigned long n) 225clear_user(void __user *to, unsigned long n)
226{ 226{
227 if (access_ok(VERIFY_WRITE, to, n)) 227 if (access_ok(VERIFY_WRITE, to, n))
228 __do_clear_user(to, n); 228 __do_clear_user(to, n);
@@ -230,7 +230,7 @@ clear_user(void *to, unsigned long n)
230} 230}
231 231
232unsigned long 232unsigned long
233__clear_user(void *to, unsigned long n) 233__clear_user(void __user *to, unsigned long n)
234{ 234{
235 __do_clear_user(to, n); 235 __do_clear_user(to, n);
236 return n; 236 return n;
@@ -244,7 +244,7 @@ __clear_user(void *to, unsigned long n)
244 244
245#ifdef CONFIG_ISA_DUAL_ISSUE 245#ifdef CONFIG_ISA_DUAL_ISSUE
246 246
247long strnlen_user(const char *s, long n) 247long strnlen_user(const char __user *s, long n)
248{ 248{
249 unsigned long mask = -__addr_ok(s); 249 unsigned long mask = -__addr_ok(s);
250 unsigned long res; 250 unsigned long res;
@@ -313,7 +313,7 @@ long strnlen_user(const char *s, long n)
313 313
314#else /* not CONFIG_ISA_DUAL_ISSUE */ 314#else /* not CONFIG_ISA_DUAL_ISSUE */
315 315
316long strnlen_user(const char *s, long n) 316long strnlen_user(const char __user *s, long n)
317{ 317{
318 unsigned long mask = -__addr_ok(s); 318 unsigned long mask = -__addr_ok(s);
319 unsigned long res; 319 unsigned long res;
diff --git a/arch/ppc/boot/ld.script b/arch/ppc/boot/ld.script
index 9362193742ac..d4dd8f15395e 100644
--- a/arch/ppc/boot/ld.script
+++ b/arch/ppc/boot/ld.script
@@ -1,4 +1,4 @@
1OUTPUT_ARCH(powerpc) 1OUTPUT_ARCH(powerpc:common)
2SECTIONS 2SECTIONS
3{ 3{
4 /* Read-only sections, merged into text segment: */ 4 /* Read-only sections, merged into text segment: */
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 1fb92f16acd6..b1457a8a9c0f 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -15,9 +15,8 @@ extra-y += vmlinux.lds
15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ 15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
16 process.o signal.o ptrace.o align.o \ 16 process.o signal.o ptrace.o align.o \
17 semaphore.o syscalls.o setup.o \ 17 semaphore.o syscalls.o setup.o \
18 cputable.o ppc_htab.o 18 cputable.o ppc_htab.o perfmon.o
19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
20obj-$(CONFIG_E500) += perfmon.o
21obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
22obj-$(CONFIG_POWER4) += cpu_setup_power4.o 21obj-$(CONFIG_POWER4) += cpu_setup_power4.o
23obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 22obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
index fa1dad96b830..22df9a596a0f 100644
--- a/arch/ppc/kernel/perfmon.c
+++ b/arch/ppc/kernel/perfmon.c
@@ -45,9 +45,8 @@ static void dummy_perf(struct pt_regs *regs)
45 mtpmr(PMRN_PMGC0, pmgc0); 45 mtpmr(PMRN_PMGC0, pmgc0);
46} 46}
47 47
48#else 48#elif defined(CONFIG_6xx)
49/* Ensure exceptions are disabled */ 49/* Ensure exceptions are disabled */
50
51static void dummy_perf(struct pt_regs *regs) 50static void dummy_perf(struct pt_regs *regs)
52{ 51{
53 unsigned int mmcr0 = mfspr(SPRN_MMCR0); 52 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
@@ -55,6 +54,10 @@ static void dummy_perf(struct pt_regs *regs)
55 mmcr0 &= ~MMCR0_PMXE; 54 mmcr0 &= ~MMCR0_PMXE;
56 mtspr(SPRN_MMCR0, mmcr0); 55 mtspr(SPRN_MMCR0, mmcr0);
57} 56}
57#else
58static void dummy_perf(struct pt_regs *regs)
59{
60}
58#endif 61#endif
59 62
60void (*perf_irq)(struct pt_regs *) = dummy_perf; 63void (*perf_irq)(struct pt_regs *) = dummy_perf;
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c
index ac391d463d78..78a403b48dba 100644
--- a/arch/ppc/platforms/4xx/bamboo.c
+++ b/arch/ppc/platforms/4xx/bamboo.c
@@ -27,7 +27,6 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/ide.h> 28#include <linux/ide.h>
29#include <linux/initrd.h> 29#include <linux/initrd.h>
30#include <linux/irq.h>
31#include <linux/seq_file.h> 30#include <linux/seq_file.h>
32#include <linux/root_dev.h> 31#include <linux/root_dev.h>
33#include <linux/tty.h> 32#include <linux/tty.h>
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index d6b2b1965dcb..27b778ab903b 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -30,7 +30,6 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/ide.h> 31#include <linux/ide.h>
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/irq.h>
34#include <linux/seq_file.h> 33#include <linux/seq_file.h>
35#include <linux/root_dev.h> 34#include <linux/root_dev.h>
36#include <linux/tty.h> 35#include <linux/tty.h>
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c
index a38e6f9ef858..16d953bda22c 100644
--- a/arch/ppc/platforms/4xx/luan.c
+++ b/arch/ppc/platforms/4xx/luan.c
@@ -28,7 +28,6 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/ide.h> 29#include <linux/ide.h>
30#include <linux/initrd.h> 30#include <linux/initrd.h>
31#include <linux/irq.h>
32#include <linux/seq_file.h> 31#include <linux/seq_file.h>
33#include <linux/root_dev.h> 32#include <linux/root_dev.h>
34#include <linux/tty.h> 33#include <linux/tty.h>
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index 80028df1b445..506949c5dd29 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -28,7 +28,6 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/ide.h> 29#include <linux/ide.h>
30#include <linux/initrd.h> 30#include <linux/initrd.h>
31#include <linux/irq.h>
32#include <linux/seq_file.h> 31#include <linux/seq_file.h>
33#include <linux/root_dev.h> 32#include <linux/root_dev.h>
34#include <linux/tty.h> 33#include <linux/tty.h>
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
index b38a851a64ec..79b3f533d0a3 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/root_dev.h> 28#include <linux/root_dev.h>
30#include <linux/serial.h> 29#include <linux/serial.h>
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index f761fdf160db..7dc8a68acfd0 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/root_dev.h> 28#include <linux/root_dev.h>
30#include <linux/serial.h> 29#include <linux/serial.h>
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index f2748c88665a..8841fd7da6ee 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/root_dev.h> 28#include <linux/root_dev.h>
30#include <linux/serial.h> 29#include <linux/serial.h>
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
index 18e952d1767c..bd3ac0136756 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/serial.h> 28#include <linux/serial.h>
30#include <linux/module.h> 29#include <linux/module.h>
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 6267b294f704..9f9039498ae5 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/serial.h> 28#include <linux/serial.h>
30#include <linux/module.h> 29#include <linux/module.h>
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 165df94d4aa6..c76760a781c1 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -24,7 +24,6 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/root_dev.h> 28#include <linux/root_dev.h>
30#include <linux/serial.h> 29#include <linux/serial.h>
diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c
index 4f6d1ddd6fb8..c02f110219f5 100644
--- a/arch/ppc/platforms/85xx/sbc85xx.c
+++ b/arch/ppc/platforms/85xx/sbc85xx.c
@@ -23,7 +23,6 @@
23#include <linux/major.h> 23#include <linux/major.h>
24#include <linux/console.h> 24#include <linux/console.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/irq.h>
27#include <linux/seq_file.h> 26#include <linux/seq_file.h>
28#include <linux/serial.h> 27#include <linux/serial.h>
29#include <linux/module.h> 28#include <linux/module.h>
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index c99b365d6110..20940f4044f4 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -30,7 +30,6 @@
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/console.h> 31#include <linux/console.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/irq.h>
34#include <linux/root_dev.h> 33#include <linux/root_dev.h>
35#include <linux/seq_file.h> 34#include <linux/seq_file.h>
36#include <linux/serial.h> 35#include <linux/serial.h>
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index 7786818bd9d0..df6ff98c023a 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -35,7 +35,6 @@
35#include <asm/time.h> 35#include <asm/time.h>
36#include <asm/dma.h> 36#include <asm/dma.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <linux/irq.h>
39#include <asm/hw_irq.h> 38#include <asm/hw_irq.h>
40#include <asm/machdep.h> 39#include <asm/machdep.h>
41#include <asm/kgdb.h> 40#include <asm/kgdb.h>
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index 57f29ab29bda..66346f0de7ec 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -32,7 +32,6 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/ide.h> 34#include <linux/ide.h>
35#include <linux/irq.h>
36#include <linux/console.h> 35#include <linux/console.h>
37#include <linux/seq_file.h> 36#include <linux/seq_file.h>
38#include <linux/root_dev.h> 37#include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c
index e391e52383c7..3a5ff9fb71d6 100644
--- a/arch/ppc/platforms/gemini_setup.c
+++ b/arch/ppc/platforms/gemini_setup.c
@@ -21,7 +21,6 @@
21#include <linux/major.h> 21#include <linux/major.h>
22#include <linux/initrd.h> 22#include <linux/initrd.h>
23#include <linux/console.h> 23#include <linux/console.h>
24#include <linux/irq.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/root_dev.h> 25#include <linux/root_dev.h>
27#include <linux/bcd.h> 26#include <linux/bcd.h>
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c
index b292b44b760c..ce2ce88c8033 100644
--- a/arch/ppc/platforms/mvme5100.c
+++ b/arch/ppc/platforms/mvme5100.c
@@ -20,7 +20,6 @@
20#include <linux/initrd.h> 20#include <linux/initrd.h>
21#include <linux/console.h> 21#include <linux/console.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/irq.h>
24#include <linux/ide.h> 23#include <linux/ide.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/kdev_t.h> 25#include <linux/kdev_t.h>
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index c0605244edda..d4bc5f67ec53 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -695,6 +695,13 @@ static int __init pmac_cpufreq_setup(void)
695 set_speed_proc = pmu_set_cpu_speed; 695 set_speed_proc = pmu_set_cpu_speed;
696 is_pmu_based = 1; 696 is_pmu_based = 1;
697 } 697 }
698 /* Else check for TiPb 550 */
699 else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
700 hi_freq = cur_freq;
701 low_freq = 500000;
702 set_speed_proc = pmu_set_cpu_speed;
703 is_pmu_based = 1;
704 }
698 /* Else check for TiPb 400 & 500 */ 705 /* Else check for TiPb 400 & 500 */
699 else if (machine_is_compatible("PowerBook3,2")) { 706 else if (machine_is_compatible("PowerBook3,2")) {
700 /* We only know about the 400 MHz and the 500Mhz model 707 /* We only know about the 400 MHz and the 500Mhz model
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index 867336ad5d36..dd6d45ae0501 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -2337,6 +2337,10 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
2337 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2337 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2338 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, 2338 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2339 }, 2339 },
2340 { "PowerBook6,7", "iBook G4",
2341 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2342 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2343 },
2340 { "PowerBook6,8", "PowerBook G4 12\"", 2344 { "PowerBook6,8", "PowerBook G4 12\"",
2341 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2345 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2342 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, 2346 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index b392b9a15987..d6356f480d90 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -48,7 +48,6 @@
48#include <linux/adb.h> 48#include <linux/adb.h>
49#include <linux/cuda.h> 49#include <linux/cuda.h>
50#include <linux/pmu.h> 50#include <linux/pmu.h>
51#include <linux/irq.h>
52#include <linux/seq_file.h> 51#include <linux/seq_file.h>
53#include <linux/root_dev.h> 52#include <linux/root_dev.h>
54#include <linux/bitops.h> 53#include <linux/bitops.h>
@@ -719,7 +718,8 @@ pmac_declare_of_platform_devices(void)
719 if (np) { 718 if (np) {
720 for (np = np->child; np != NULL; np = np->sibling) 719 for (np = np->child; np != NULL; np = np->sibling)
721 if (strncmp(np->name, "i2c", 3) == 0) { 720 if (strncmp(np->name, "i2c", 3) == 0) {
722 of_platform_device_create(np, "uni-n-i2c"); 721 of_platform_device_create(np, "uni-n-i2c",
722 NULL);
723 break; 723 break;
724 } 724 }
725 } 725 }
@@ -727,17 +727,18 @@ pmac_declare_of_platform_devices(void)
727 if (np) { 727 if (np) {
728 for (np = np->child; np != NULL; np = np->sibling) 728 for (np = np->child; np != NULL; np = np->sibling)
729 if (strncmp(np->name, "i2c", 3) == 0) { 729 if (strncmp(np->name, "i2c", 3) == 0) {
730 of_platform_device_create(np, "u3-i2c"); 730 of_platform_device_create(np, "u3-i2c",
731 NULL);
731 break; 732 break;
732 } 733 }
733 } 734 }
734 735
735 np = find_devices("valkyrie"); 736 np = find_devices("valkyrie");
736 if (np) 737 if (np)
737 of_platform_device_create(np, "valkyrie"); 738 of_platform_device_create(np, "valkyrie", NULL);
738 np = find_devices("platinum"); 739 np = find_devices("platinum");
739 if (np) 740 if (np)
740 of_platform_device_create(np, "platinum"); 741 of_platform_device_create(np, "platinum", NULL);
741 742
742 return 0; 743 return 0;
743} 744}
diff --git a/arch/ppc/platforms/powerpmc250.c b/arch/ppc/platforms/powerpmc250.c
index 0abe15159e6c..e6b520e6e13f 100644
--- a/arch/ppc/platforms/powerpmc250.c
+++ b/arch/ppc/platforms/powerpmc250.c
@@ -26,7 +26,6 @@
26#include <linux/initrd.h> 26#include <linux/initrd.h>
27#include <linux/console.h> 27#include <linux/console.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <linux/seq_file.h> 30#include <linux/seq_file.h>
32#include <linux/ide.h> 31#include <linux/ide.h>
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index 65705c911795..e70aae20d6f9 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -22,7 +22,6 @@
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/console.h> 23#include <linux/console.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/irq.h>
26#include <linux/ide.h> 25#include <linux/ide.h>
27#include <linux/seq_file.h> 26#include <linux/seq_file.h>
28#include <linux/root_dev.h> 27#include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/prpmc750.c b/arch/ppc/platforms/prpmc750.c
index 24ae1caafc61..0bb14a5e824c 100644
--- a/arch/ppc/platforms/prpmc750.c
+++ b/arch/ppc/platforms/prpmc750.c
@@ -24,7 +24,6 @@
24#include <linux/initrd.h> 24#include <linux/initrd.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
29#include <linux/ide.h> 28#include <linux/ide.h>
30#include <linux/root_dev.h> 29#include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/prpmc800.c b/arch/ppc/platforms/prpmc800.c
index 8b09fa69b35b..de7baefacd3a 100644
--- a/arch/ppc/platforms/prpmc800.c
+++ b/arch/ppc/platforms/prpmc800.c
@@ -22,7 +22,6 @@
22#include <linux/initrd.h> 22#include <linux/initrd.h>
23#include <linux/console.h> 23#include <linux/console.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/irq.h>
26#include <linux/seq_file.h> 25#include <linux/seq_file.h>
27#include <linux/ide.h> 26#include <linux/ide.h>
28#include <linux/root_dev.h> 27#include <linux/root_dev.h>
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index c30607a972d8..0376c8cff5d1 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -32,7 +32,6 @@
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/console.h> 33#include <linux/console.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/irq.h>
36#include <linux/ide.h> 35#include <linux/ide.h>
37#include <linux/seq_file.h> 36#include <linux/seq_file.h>
38#include <linux/root_dev.h> 37#include <linux/root_dev.h>
@@ -59,7 +58,6 @@
59#include <asm/mpc10x.h> 58#include <asm/mpc10x.h>
60#include <asm/pci-bridge.h> 59#include <asm/pci-bridge.h>
61#include <asm/mv64x60.h> 60#include <asm/mv64x60.h>
62#include <asm/i8259.h>
63 61
64#include "radstone_ppc7d.h" 62#include "radstone_ppc7d.h"
65 63
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c
index 21e31346b12b..5232283c1974 100644
--- a/arch/ppc/platforms/sandpoint.c
+++ b/arch/ppc/platforms/sandpoint.c
@@ -74,7 +74,6 @@
74#include <linux/initrd.h> 74#include <linux/initrd.h>
75#include <linux/console.h> 75#include <linux/console.h>
76#include <linux/delay.h> 76#include <linux/delay.h>
77#include <linux/irq.h>
78#include <linux/ide.h> 77#include <linux/ide.h>
79#include <linux/seq_file.h> 78#include <linux/seq_file.h>
80#include <linux/root_dev.h> 79#include <linux/root_dev.h>
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 839f8872826f..4849850a59ed 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -34,7 +34,7 @@ u8 mv64x60_pci_exclude_bridge = 1;
34DEFINE_SPINLOCK(mv64x60_lock); 34DEFINE_SPINLOCK(mv64x60_lock);
35 35
36static phys_addr_t mv64x60_bridge_pbase; 36static phys_addr_t mv64x60_bridge_pbase;
37static void *mv64x60_bridge_vbase; 37static void __iomem *mv64x60_bridge_vbase;
38static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID; 38static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID;
39static u32 mv64x60_bridge_rev; 39static u32 mv64x60_bridge_rev;
40#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260) 40#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
@@ -938,7 +938,7 @@ mv64x60_setup_for_chip(struct mv64x60_handle *bh)
938 * 938 *
939 * Return the virtual address of the bridge's registers. 939 * Return the virtual address of the bridge's registers.
940 */ 940 */
941void * 941void __iomem *
942mv64x60_get_bridge_vbase(void) 942mv64x60_get_bridge_vbase(void)
943{ 943{
944 return mv64x60_bridge_vbase; 944 return mv64x60_bridge_vbase;
diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c
index da8a0f2128dc..93c7231ea709 100644
--- a/arch/ppc/syslib/of_device.c
+++ b/arch/ppc/syslib/of_device.c
@@ -234,7 +234,9 @@ void of_device_unregister(struct of_device *ofdev)
234 device_unregister(&ofdev->dev); 234 device_unregister(&ofdev->dev);
235} 235}
236 236
237struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) 237struct of_device* of_platform_device_create(struct device_node *np,
238 const char *bus_id,
239 struct device *parent)
238{ 240{
239 struct of_device *dev; 241 struct of_device *dev;
240 u32 *reg; 242 u32 *reg;
@@ -247,7 +249,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
247 dev->node = of_node_get(np); 249 dev->node = of_node_get(np);
248 dev->dma_mask = 0xffffffffUL; 250 dev->dma_mask = 0xffffffffUL;
249 dev->dev.dma_mask = &dev->dma_mask; 251 dev->dev.dma_mask = &dev->dma_mask;
250 dev->dev.parent = NULL; 252 dev->dev.parent = parent;
251 dev->dev.bus = &of_platform_bus_type; 253 dev->dev.bus = &of_platform_bus_type;
252 dev->dev.release = of_release_dev; 254 dev->dev.release = of_release_dev;
253 255
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 53da58523e39..1cf5de21a3fd 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -13,7 +13,6 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18#include <linux/sysdev.h> 17#include <linux/sysdev.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
index 9a7e8748e2b2..16cff91d9f41 100644
--- a/arch/ppc/syslib/open_pic2.c
+++ b/arch/ppc/syslib/open_pic2.c
@@ -17,7 +17,6 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/irq.h>
21#include <linux/interrupt.h> 20#include <linux/interrupt.h>
22#include <linux/sysdev.h> 21#include <linux/sysdev.h>
23#include <linux/errno.h> 22#include <linux/errno.h>
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c
index b843c4fef25e..bf83240689dc 100644
--- a/arch/ppc/syslib/ppc4xx_setup.c
+++ b/arch/ppc/syslib/ppc4xx_setup.c
@@ -18,7 +18,6 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/threads.h> 19#include <linux/threads.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/irq.h>
22#include <linux/reboot.h> 21#include <linux/reboot.h>
23#include <linux/param.h> 22#include <linux/param.h>
24#include <linux/string.h> 23#include <linux/string.h>
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index f33a7bccb0d7..507eb9d0223f 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -99,7 +99,11 @@ get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_
99 break; 99 break;
100 100
101 default: /* not a known compile time constant */ 101 default: /* not a known compile time constant */
102 BUILD_BUG_ON(1); 102 {
103 /* BUILD_BUG_ON() is not usable here */
104 extern void __get_iost_entry_bad_page_size(void);
105 __get_iost_entry_bad_page_size();
106 }
103 break; 107 break;
104 } 108 }
105 109
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 7e80d49c589a..9c6facc24f70 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -59,9 +59,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
59 59
60 /* insn must be on a special executable page on ppc64 */ 60 /* insn must be on a special executable page on ppc64 */
61 if (!ret) { 61 if (!ret) {
62 up(&kprobe_mutex);
63 p->ainsn.insn = get_insn_slot();
64 down(&kprobe_mutex); 62 down(&kprobe_mutex);
63 p->ainsn.insn = get_insn_slot();
64 up(&kprobe_mutex);
65 if (!p->ainsn.insn) 65 if (!p->ainsn.insn)
66 ret = -ENOMEM; 66 ret = -ENOMEM;
67 } 67 }
@@ -90,9 +90,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
90 90
91void __kprobes arch_remove_kprobe(struct kprobe *p) 91void __kprobes arch_remove_kprobe(struct kprobe *p)
92{ 92{
93 up(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn);
95 down(&kprobe_mutex); 93 down(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn);
95 up(&kprobe_mutex);
96} 96}
97 97
98static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 98static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index 4775f12a013c..bf7cc4f8210f 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -205,6 +205,7 @@ static void kexec_prepare_cpus(void)
205 continue; 205 continue;
206 206
207 while (paca[i].hw_cpu_id != -1) { 207 while (paca[i].hw_cpu_id != -1) {
208 barrier();
208 if (!cpu_possible(i)) { 209 if (!cpu_possible(i)) {
209 printk("kexec: cpu %d hw_cpu_id %d is not" 210 printk("kexec: cpu %d hw_cpu_id %d is not"
210 " possible, ignoring\n", 211 " possible, ignoring\n",
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
index da580812ddfe..9f200f0f2ad5 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/ppc64/kernel/of_device.c
@@ -233,7 +233,9 @@ void of_device_unregister(struct of_device *ofdev)
233 device_unregister(&ofdev->dev); 233 device_unregister(&ofdev->dev);
234} 234}
235 235
236struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) 236struct of_device* of_platform_device_create(struct device_node *np,
237 const char *bus_id,
238 struct device *parent)
237{ 239{
238 struct of_device *dev; 240 struct of_device *dev;
239 241
@@ -245,7 +247,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
245 dev->node = np; 247 dev->node = np;
246 dev->dma_mask = 0xffffffffUL; 248 dev->dma_mask = 0xffffffffUL;
247 dev->dev.dma_mask = &dev->dma_mask; 249 dev->dev.dma_mask = &dev->dma_mask;
248 dev->dev.parent = NULL; 250 dev->dev.parent = parent;
249 dev->dev.bus = &of_platform_bus_type; 251 dev->dev.bus = &of_platform_bus_type;
250 dev->dev.release = of_release_dev; 252 dev->dev.release = of_release_dev;
251 253
@@ -259,6 +261,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
259 return dev; 261 return dev;
260} 262}
261 263
264
262EXPORT_SYMBOL(of_match_device); 265EXPORT_SYMBOL(of_match_device);
263EXPORT_SYMBOL(of_platform_bus_type); 266EXPORT_SYMBOL(of_platform_bus_type);
264EXPORT_SYMBOL(of_register_driver); 267EXPORT_SYMBOL(of_register_driver);
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index 8c6313e7e145..d17f0108a032 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -364,7 +364,8 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
364 364
365 while (pci->phb->dma_window_size * children > 0x80000000ul) 365 while (pci->phb->dma_window_size * children > 0x80000000ul)
366 pci->phb->dma_window_size >>= 1; 366 pci->phb->dma_window_size >>= 1;
367 DBG("No ISA/IDE, window size is %x\n", pci->phb->dma_window_size); 367 DBG("No ISA/IDE, window size is 0x%lx\n",
368 pci->phb->dma_window_size);
368 pci->phb->dma_window_base_cur = 0; 369 pci->phb->dma_window_base_cur = 0;
369 370
370 return; 371 return;
@@ -388,7 +389,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
388 while (pci->phb->dma_window_size * children > 0x70000000ul) 389 while (pci->phb->dma_window_size * children > 0x70000000ul)
389 pci->phb->dma_window_size >>= 1; 390 pci->phb->dma_window_size >>= 1;
390 391
391 DBG("ISA/IDE, window size is %x\n", pci->phb->dma_window_size); 392 DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
392 393
393} 394}
394 395
@@ -442,7 +443,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
442 struct device_node *dn, *mydn; 443 struct device_node *dn, *mydn;
443 struct iommu_table *tbl; 444 struct iommu_table *tbl;
444 445
445 DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, dev->pretty_name); 446 DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
446 447
447 mydn = dn = pci_device_to_OF_node(dev); 448 mydn = dn = pci_device_to_OF_node(dev);
448 449
@@ -469,7 +470,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
469 if (dn && dn->data) { 470 if (dn && dn->data) {
470 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; 471 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
471 } else { 472 } else {
472 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, dev->pretty_name); 473 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
473 } 474 }
474} 475}
475 476
@@ -503,7 +504,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
503 int *dma_window = NULL; 504 int *dma_window = NULL;
504 struct pci_dn *pci; 505 struct pci_dn *pci;
505 506
506 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name); 507 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
507 508
508 /* dev setup for LPAR is a little tricky, since the device tree might 509 /* dev setup for LPAR is a little tricky, since the device tree might
509 * contain the dma-window properties per-device and not neccesarily 510 * contain the dma-window properties per-device and not neccesarily
@@ -525,9 +526,8 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
525 * slots on POWER4 machines. 526 * slots on POWER4 machines.
526 */ 527 */
527 if (dma_window == NULL || pdn->parent == NULL) { 528 if (dma_window == NULL || pdn->parent == NULL) {
528 /* Fall back to regular (non-LPAR) dev setup */ 529 DBG("No dma window for device, linking to parent\n");
529 DBG("No dma window for device, falling back to regular setup\n"); 530 PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
530 iommu_dev_setup_pSeries(dev);
531 return; 531 return;
532 } else { 532 } else {
533 DBG("Found DMA window, allocating table\n"); 533 DBG("Found DMA window, allocating table\n");
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 325426c7bed0..25755252067a 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -434,15 +434,23 @@ static int pmac_check_legacy_ioport(unsigned int baseport)
434 434
435static int __init pmac_declare_of_platform_devices(void) 435static int __init pmac_declare_of_platform_devices(void)
436{ 436{
437 struct device_node *np; 437 struct device_node *np, *npp;
438 438
439 np = find_devices("u3"); 439 npp = of_find_node_by_name(NULL, "u3");
440 if (np) { 440 if (npp) {
441 for (np = np->child; np != NULL; np = np->sibling) 441 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
442 if (strncmp(np->name, "i2c", 3) == 0) { 442 if (strncmp(np->name, "i2c", 3) == 0) {
443 of_platform_device_create(np, "u3-i2c"); 443 of_platform_device_create(np, "u3-i2c", NULL);
444 of_node_put(np);
444 break; 445 break;
445 } 446 }
447 }
448 of_node_put(npp);
449 }
450 npp = of_find_node_by_type(NULL, "smu");
451 if (npp) {
452 of_platform_device_create(npp, "smu", NULL);
453 of_node_put(npp);
446 } 454 }
447 455
448 return 0; 456 return 0;
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
index 3059edb09cc8..41bbb8c59697 100644
--- a/arch/ppc64/kernel/pmac_time.c
+++ b/arch/ppc64/kernel/pmac_time.c
@@ -84,7 +84,7 @@ void __pmac pmac_get_rtc_time(struct rtc_time *tm)
84 84
85#ifdef CONFIG_PMAC_SMU 85#ifdef CONFIG_PMAC_SMU
86 case SYS_CTRLER_SMU: 86 case SYS_CTRLER_SMU:
87 smu_get_rtc_time(tm); 87 smu_get_rtc_time(tm, 1);
88 break; 88 break;
89#endif /* CONFIG_PMAC_SMU */ 89#endif /* CONFIG_PMAC_SMU */
90 default: 90 default:
@@ -128,7 +128,7 @@ int __pmac pmac_set_rtc_time(struct rtc_time *tm)
128 128
129#ifdef CONFIG_PMAC_SMU 129#ifdef CONFIG_PMAC_SMU
130 case SYS_CTRLER_SMU: 130 case SYS_CTRLER_SMU:
131 return smu_set_rtc_time(tm); 131 return smu_set_rtc_time(tm, 1);
132#endif /* CONFIG_PMAC_SMU */ 132#endif /* CONFIG_PMAC_SMU */
133 default: 133 default:
134 return -ENODEV; 134 return -ENODEV;
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
index 85ed3188a91d..b1c044ca5756 100644
--- a/arch/ppc64/kernel/ptrace.c
+++ b/arch/ppc64/kernel/ptrace.c
@@ -219,6 +219,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
219 219
220 case PTRACE_SET_DEBUGREG: 220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data); 221 ret = ptrace_set_debugreg(child, addr, data);
222 break;
222 223
223 case PTRACE_DETACH: 224 case PTRACE_DETACH:
224 ret = ptrace_detach(child, data); 225 ret = ptrace_detach(child, data);
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 7626bb59954d..bfd385b7713c 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -343,9 +343,7 @@ static void native_flush_hash_range(unsigned long context,
343 hpte_t *hptep; 343 hpte_t *hptep;
344 unsigned long hpte_v; 344 unsigned long hpte_v;
345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
346 346 unsigned long large = batch->large;
347 /* XXX fix for large ptes */
348 unsigned long large = 0;
349 347
350 local_irq_save(flags); 348 local_irq_save(flags);
351 349
@@ -407,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
407 asm volatile("ptesync":::"memory"); 405 asm volatile("ptesync":::"memory");
408 406
409 for (i = 0; i < j; i++) 407 for (i = 0; i < j; i++)
410 __tlbie(batch->vaddr[i], 0); 408 __tlbie(batch->vaddr[i], large);
411 409
412 asm volatile("eieio; tlbsync; ptesync":::"memory"); 410 asm volatile("eieio; tlbsync; ptesync":::"memory");
413 411
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 338771ec70d7..0ea0994ed974 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -710,10 +710,13 @@ repeat:
710 hpte_group = ((~hash & htab_hash_mask) * 710 hpte_group = ((~hash & htab_hash_mask) *
711 HPTES_PER_GROUP) & ~0x7UL; 711 HPTES_PER_GROUP) & ~0x7UL;
712 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 712 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
713 HPTE_V_LARGE, rflags); 713 HPTE_V_LARGE |
714 HPTE_V_SECONDARY,
715 rflags);
714 if (slot == -1) { 716 if (slot == -1) {
715 if (mftb() & 0x1) 717 if (mftb() & 0x1)
716 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; 718 hpte_group = ((hash & htab_hash_mask) *
719 HPTES_PER_GROUP)&~0x7UL;
717 720
718 ppc_md.hpte_remove(hpte_group); 721 ppc_md.hpte_remove(hpte_group);
719 goto repeat; 722 goto repeat;
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f0..21fbffb23a43 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 143 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 144 * will change mid stream.
145 */ 145 */
146 if (unlikely(i != 0 && context != batch->context)) { 146 if (i != 0 && (context != batch->context ||
147 batch->large != pte_huge(pte))) {
147 flush_tlb_pending(); 148 flush_tlb_pending();
148 i = 0; 149 i = 0;
149 } 150 }
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
151 if (i == 0) { 152 if (i == 0) {
152 batch->context = context; 153 batch->context = context;
153 batch->mm = mm; 154 batch->mm = mm;
155 batch->large = pte_huge(pte);
154 } 156 }
155 batch->pte[i] = __pte(pte); 157 batch->pte[i] = __pte(pte);
156 batch->addr[i] = addr; 158 batch->addr[i] = addr;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 7358cdb8441f..4ff6808456ea 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -143,7 +143,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
143 break; 143 break;
144 case __SI_FAULT >> 16: 144 case __SI_FAULT >> 16:
145 err |= __get_user(tmp, &from->si_addr); 145 err |= __get_user(tmp, &from->si_addr);
146 to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN); 146 to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN);
147 break; 147 break;
148 case __SI_POLL >> 16: 148 case __SI_POLL >> 16:
149 err |= __get_user(to->si_band, &from->si_band); 149 err |= __get_user(to->si_band, &from->si_band);
@@ -338,7 +338,7 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
338 err |= __get_user(kss.ss_flags, &uss->ss_flags); 338 err |= __get_user(kss.ss_flags, &uss->ss_flags);
339 if (err) 339 if (err)
340 return -EFAULT; 340 return -EFAULT;
341 kss.ss_sp = (void *) ss_sp; 341 kss.ss_sp = (void __user *) ss_sp;
342 } 342 }
343 343
344 set_fs (KERNEL_DS); 344 set_fs (KERNEL_DS);
@@ -461,7 +461,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
461 goto badframe; 461 goto badframe;
462 462
463 err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); 463 err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
464 st.ss_sp = (void *) A((unsigned long)ss_sp); 464 st.ss_sp = compat_ptr(ss_sp);
465 err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); 465 err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
466 err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); 466 err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
467 if (err) 467 if (err)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6a3f5b7473a9..6e0110d71191 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -376,8 +376,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
376 376
377 /* Create the ucontext. */ 377 /* Create the ucontext. */
378 err |= __put_user(0, &frame->uc.uc_flags); 378 err |= __put_user(0, &frame->uc.uc_flags);
379 err |= __put_user(0, &frame->uc.uc_link); 379 err |= __put_user(NULL, &frame->uc.uc_link);
380 err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 380 err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
381 err |= __put_user(sas_ss_flags(regs->gprs[15]), 381 err |= __put_user(sas_ss_flags(regs->gprs[15]),
382 &frame->uc.uc_stack.ss_flags); 382 &frame->uc.uc_stack.ss_flags);
383 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 383 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 53c192a4982f..3509e4305532 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -249,8 +249,6 @@ struct tt_entry *sparc_ttable;
249 249
250struct pt_regs fake_swapper_regs; 250struct pt_regs fake_swapper_regs;
251 251
252extern void paging_init(void);
253
254void __init setup_arch(char **cmdline_p) 252void __init setup_arch(char **cmdline_p)
255{ 253{
256 int i; 254 int i;
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index af0e9411b83e..fa06ea04837b 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization" 34 bool "Debug BOOTMEM initialization"
35 35
36config DEBUG_PAGEALLOC
37 bool "Page alloc debugging"
38 depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
39 help
40 Unmap pages from the kernel linear mapping after free_pages().
41 This results in a large slowdown, but helps to find certain types
42 of memory corruptions.
43
36config MCOUNT 44config MCOUNT
37 bool 45 bool
38 depends on STACK_DEBUG 46 depends on STACK_DEBUG
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 48756958116b..77ef5df4e5a7 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = {
39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, 39 { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, 40 { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, 41 { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
42 { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"},
43 { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"},
42}; 44};
43 45
44#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) 46#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
53 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, 55 { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
54 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, 56 { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
55 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, 57 { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
58 { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"},
59 { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"},
56}; 60};
57 61
58#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) 62#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index d710274e516b..df9a1ca8fd77 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -135,6 +135,28 @@ void __init device_scan(void)
135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node, 135 cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
136 "clock-frequency", 136 "clock-frequency",
137 0); 137 0);
138 cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
139 "dcache-size",
140 16 * 1024);
141 cpu_data(0).dcache_line_size =
142 prom_getintdefault(cpu_node, "dcache-line-size", 32);
143 cpu_data(0).icache_size = prom_getintdefault(cpu_node,
144 "icache-size",
145 16 * 1024);
146 cpu_data(0).icache_line_size =
147 prom_getintdefault(cpu_node, "icache-line-size", 32);
148 cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
149 "ecache-size",
150 4 * 1024 * 1024);
151 cpu_data(0).ecache_line_size =
152 prom_getintdefault(cpu_node, "ecache-line-size", 64);
153 printk("CPU[0]: Caches "
154 "D[sz(%d):line_sz(%d)] "
155 "I[sz(%d):line_sz(%d)] "
156 "E[sz(%d):line_sz(%d)]\n",
157 cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
158 cpu_data(0).icache_size, cpu_data(0).icache_line_size,
159 cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
138 } 160 }
139#endif 161#endif
140 162
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
index 538522848ad4..acc889a7f9c1 100644
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -9,17 +9,7 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/mmu.h> 10#include <asm/mmu.h>
11 11
12#if PAGE_SHIFT == 13 12#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS)
13#define SZ_BITS _PAGE_SZ8K
14#elif PAGE_SHIFT == 16
15#define SZ_BITS _PAGE_SZ64K
16#elif PAGE_SHIFT == 19
17#define SZ_BITS _PAGE_SZ512K
18#elif PAGE_SHIFT == 22
19#define SZ_BITS _PAGE_SZ4MB
20#endif
21
22#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
23 13
24#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) 14#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
25#define VPTE_SHIFT (PAGE_SHIFT - 3) 15#define VPTE_SHIFT (PAGE_SHIFT - 3)
@@ -163,7 +153,6 @@ sparc64_vpte_continue:
163 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS 153 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
164 retry ! Load PTE once again 154 retry ! Load PTE once again
165 155
166#undef SZ_BITS
167#undef VALID_SZ_BITS 156#undef VALID_SZ_BITS
168#undef VPTE_SHIFT 157#undef VPTE_SHIFT
169#undef VPTE_BITS 158#undef VPTE_BITS
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index ded2fed23fcc..702d349c1e88 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -71,7 +71,7 @@
71from_tl1_trap: 71from_tl1_trap:
72 rdpr %tl, %g5 ! For TL==3 test 72 rdpr %tl, %g5 ! For TL==3 test
73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset 73 CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
74 be,pn %xcc, 3f ! Yep, special processing 74 be,pn %xcc, kvmap ! Yep, special processing
75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset 75 CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
76 cmp %g5, 4 ! Last trap level? 76 cmp %g5, 4 ! Last trap level?
77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss 77 be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
@@ -83,9 +83,9 @@ from_tl1_trap:
83 nop ! Delay-slot 83 nop ! Delay-slot
849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 849: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
85 retry ! Trap return 85 retry ! Trap return
863: brlz,pt %g4, 9b ! Kernel virtual map? 86 nop
87 xor %g2, %g4, %g5 ! Finish bit twiddles 87 nop
88 ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc 88 nop
89 89
90/* DTLB ** ICACHE line 3: winfixups+real_faults */ 90/* DTLB ** ICACHE line 3: winfixups+real_faults */
91longpath: 91longpath:
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index b48349527853..2879b1072921 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,159 +30,6 @@
30 .text 30 .text
31 .align 32 31 .align 32
32 32
33 .globl sparc64_vpte_patchme1
34 .globl sparc64_vpte_patchme2
35/*
36 * On a second level vpte miss, check whether the original fault is to the OBP
37 * range (note that this is only possible for instruction miss, data misses to
38 * obp range do not use vpte). If so, go back directly to the faulting address.
39 * This is because we want to read the tpc, otherwise we have no way of knowing
40 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
41 * also ensures no vpte range addresses are dropped into tlb while obp is
42 * executing (see inherit_locked_prom_mappings() rant).
43 */
44sparc64_vpte_nucleus:
45 /* Note that kvmap below has verified that the address is
46 * in the range MODULES_VADDR --> VMALLOC_END already. So
47 * here we need only check if it is an OBP address or not.
48 */
49 sethi %hi(LOW_OBP_ADDRESS), %g5
50 cmp %g4, %g5
51 blu,pn %xcc, sparc64_vpte_patchme1
52 mov 0x1, %g5
53 sllx %g5, 32, %g5
54 cmp %g4, %g5
55 blu,pn %xcc, obp_iaddr_patch
56 nop
57
58 /* These two instructions are patched by paginig_init(). */
59sparc64_vpte_patchme1:
60 sethi %hi(0), %g5
61sparc64_vpte_patchme2:
62 or %g5, %lo(0), %g5
63
64 /* With kernel PGD in %g5, branch back into dtlb_backend. */
65 ba,pt %xcc, sparc64_kpte_continue
66 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
67
68vpte_noent:
69 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
70 * skip over the trap instruction so that the top level
71 * TLB miss handler will thing this %g5 value is just an
72 * invalid PTE, thus branching to full fault processing.
73 */
74 mov TLB_SFSR, %g1
75 stxa %g4, [%g1 + %g1] ASI_DMMU
76 done
77
78 .globl obp_iaddr_patch
79obp_iaddr_patch:
80 /* These two instructions patched by inherit_prom_mappings(). */
81 sethi %hi(0), %g5
82 or %g5, %lo(0), %g5
83
84 /* Behave as if we are at TL0. */
85 wrpr %g0, 1, %tl
86 rdpr %tpc, %g4 /* Find original faulting iaddr */
87 srlx %g4, 13, %g4 /* Throw out context bits */
88 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
89
90 /* Restore previous TAG_ACCESS. */
91 mov TLB_SFSR, %g1
92 stxa %g4, [%g1 + %g1] ASI_IMMU
93
94 /* Get PMD offset. */
95 srlx %g4, 23, %g6
96 and %g6, 0x7ff, %g6
97 sllx %g6, 2, %g6
98
99 /* Load PMD, is it valid? */
100 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
101 brz,pn %g5, longpath
102 sllx %g5, 11, %g5
103
104 /* Get PTE offset. */
105 srlx %g4, 13, %g6
106 and %g6, 0x3ff, %g6
107 sllx %g6, 3, %g6
108
109 /* Load PTE. */
110 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
111 brgez,pn %g5, longpath
112 nop
113
114 /* TLB load and return from trap. */
115 stxa %g5, [%g0] ASI_ITLB_DATA_IN
116 retry
117
118 .globl obp_daddr_patch
119obp_daddr_patch:
120 /* These two instructions patched by inherit_prom_mappings(). */
121 sethi %hi(0), %g5
122 or %g5, %lo(0), %g5
123
124 /* Get PMD offset. */
125 srlx %g4, 23, %g6
126 and %g6, 0x7ff, %g6
127 sllx %g6, 2, %g6
128
129 /* Load PMD, is it valid? */
130 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
131 brz,pn %g5, longpath
132 sllx %g5, 11, %g5
133
134 /* Get PTE offset. */
135 srlx %g4, 13, %g6
136 and %g6, 0x3ff, %g6
137 sllx %g6, 3, %g6
138
139 /* Load PTE. */
140 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
141 brgez,pn %g5, longpath
142 nop
143
144 /* TLB load and return from trap. */
145 stxa %g5, [%g0] ASI_DTLB_DATA_IN
146 retry
147
148/*
149 * On a first level data miss, check whether this is to the OBP range (note
150 * that such accesses can be made by prom, as well as by kernel using
151 * prom_getproperty on "address"), and if so, do not use vpte access ...
152 * rather, use information saved during inherit_prom_mappings() using 8k
153 * pagesize.
154 */
155 .align 32
156kvmap:
157 sethi %hi(MODULES_VADDR), %g5
158 cmp %g4, %g5
159 blu,pn %xcc, longpath
160 mov (VMALLOC_END >> 24), %g5
161 sllx %g5, 24, %g5
162 cmp %g4, %g5
163 bgeu,pn %xcc, longpath
164 nop
165
166kvmap_check_obp:
167 sethi %hi(LOW_OBP_ADDRESS), %g5
168 cmp %g4, %g5
169 blu,pn %xcc, kvmap_vmalloc_addr
170 mov 0x1, %g5
171 sllx %g5, 32, %g5
172 cmp %g4, %g5
173 blu,pn %xcc, obp_daddr_patch
174 nop
175
176kvmap_vmalloc_addr:
177 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
178 ldxa [%g3 + %g6] ASI_N, %g5
179 brgez,pn %g5, longpath
180 nop
181
182 /* PTE is valid, load into TLB and return from trap. */
183 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
184 retry
185
186 /* This is trivial with the new code... */ 33 /* This is trivial with the new code... */
187 .globl do_fpdis 34 .globl do_fpdis
188do_fpdis: 35do_fpdis:
@@ -525,14 +372,13 @@ cheetah_plus_patch_fpdis:
525 * 372 *
526 * DATA 0: [low 32-bits] Address of function to call, jmp to this 373 * DATA 0: [low 32-bits] Address of function to call, jmp to this
527 * [high 32-bits] MMU Context Argument 0, place in %g5 374 * [high 32-bits] MMU Context Argument 0, place in %g5
528 * DATA 1: Address Argument 1, place in %g6 375 * DATA 1: Address Argument 1, place in %g1
529 * DATA 2: Address Argument 2, place in %g7 376 * DATA 2: Address Argument 2, place in %g7
530 * 377 *
531 * With this method we can do most of the cross-call tlb/cache 378 * With this method we can do most of the cross-call tlb/cache
532 * flushing very quickly. 379 * flushing very quickly.
533 * 380 *
534 * Current CPU's IRQ worklist table is locked into %g1, 381 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
535 * don't touch.
536 */ 382 */
537 .text 383 .text
538 .align 32 384 .align 32
@@ -1006,13 +852,14 @@ cheetah_plus_dcpe_trap_vector:
1006 nop 852 nop
1007 853
1008do_cheetah_plus_data_parity: 854do_cheetah_plus_data_parity:
1009 ba,pt %xcc, etrap 855 rdpr %pil, %g2
856 wrpr %g0, 15, %pil
857 ba,pt %xcc, etrap_irq
1010 rd %pc, %g7 858 rd %pc, %g7
1011 mov 0x0, %o0 859 mov 0x0, %o0
1012 call cheetah_plus_parity_error 860 call cheetah_plus_parity_error
1013 add %sp, PTREGS_OFF, %o1 861 add %sp, PTREGS_OFF, %o1
1014 ba,pt %xcc, rtrap 862 ba,a,pt %xcc, rtrap_irq
1015 clr %l6
1016 863
1017cheetah_plus_dcpe_trap_vector_tl1: 864cheetah_plus_dcpe_trap_vector_tl1:
1018 membar #Sync 865 membar #Sync
@@ -1036,13 +883,14 @@ cheetah_plus_icpe_trap_vector:
1036 nop 883 nop
1037 884
1038do_cheetah_plus_insn_parity: 885do_cheetah_plus_insn_parity:
1039 ba,pt %xcc, etrap 886 rdpr %pil, %g2
887 wrpr %g0, 15, %pil
888 ba,pt %xcc, etrap_irq
1040 rd %pc, %g7 889 rd %pc, %g7
1041 mov 0x1, %o0 890 mov 0x1, %o0
1042 call cheetah_plus_parity_error 891 call cheetah_plus_parity_error
1043 add %sp, PTREGS_OFF, %o1 892 add %sp, PTREGS_OFF, %o1
1044 ba,pt %xcc, rtrap 893 ba,a,pt %xcc, rtrap_irq
1045 clr %l6
1046 894
1047cheetah_plus_icpe_trap_vector_tl1: 895cheetah_plus_icpe_trap_vector_tl1:
1048 membar #Sync 896 membar #Sync
@@ -1075,6 +923,10 @@ do_dcpe_tl1:
1075 nop 923 nop
1076 wrpr %g1, %tl ! Restore original trap level 924 wrpr %g1, %tl ! Restore original trap level
1077do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 925do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
926 sethi %hi(dcache_parity_tl1_occurred), %g2
927 lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
928 add %g1, 1, %g1
929 stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
1078 /* Reset D-cache parity */ 930 /* Reset D-cache parity */
1079 sethi %hi(1 << 16), %g1 ! D-cache size 931 sethi %hi(1 << 16), %g1 ! D-cache size
1080 mov (1 << 5), %g2 ! D-cache line size 932 mov (1 << 5), %g2 ! D-cache line size
@@ -1121,6 +973,10 @@ do_icpe_tl1:
1121 nop 973 nop
1122 wrpr %g1, %tl ! Restore original trap level 974 wrpr %g1, %tl ! Restore original trap level
1123do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 975do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
976 sethi %hi(icache_parity_tl1_occurred), %g2
977 lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
978 add %g1, 1, %g1
979 stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
1124 /* Flush I-cache */ 980 /* Flush I-cache */
1125 sethi %hi(1 << 15), %g1 ! I-cache size 981 sethi %hi(1 << 15), %g1 ! I-cache size
1126 mov (1 << 5), %g2 ! I-cache line size 982 mov (1 << 5), %g2 ! I-cache line size
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 1fa06c4e3bdb..89406f9649a9 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -80,15 +80,165 @@ sparc_ramdisk_image64:
80 .xword 0 80 .xword 0
81 .word _end 81 .word _end
82 82
83 /* We must be careful, 32-bit OpenBOOT will get confused if it 83 /* PROM cif handler code address is in %o4. */
84 * tries to save away a register window to a 64-bit kernel 84sparc64_boot:
85 * stack address. Flush all windows, disable interrupts, 851: rd %pc, %g7
86 * remap if necessary, jump onto kernel trap table, then kernel 86 set 1b, %g1
87 * stack, or else we die. 87 cmp %g1, %g7
88 be,pn %xcc, sparc64_boot_after_remap
89 mov %o4, %l7
90
91 /* We need to remap the kernel. Use position independant
92 * code to remap us to KERNBASE.
88 * 93 *
89 * PROM entry point is on %o4 94 * SILO can invoke us with 32-bit address masking enabled,
95 * so make sure that's clear.
90 */ 96 */
91sparc64_boot: 97 rdpr %pstate, %g1
98 andn %g1, PSTATE_AM, %g1
99 wrpr %g1, 0x0, %pstate
100 ba,a,pt %xcc, 1f
101
102 .globl prom_finddev_name, prom_chosen_path
103 .globl prom_getprop_name, prom_mmu_name
104 .globl prom_callmethod_name, prom_translate_name
105 .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
106 .globl prom_boot_mapped_pc, prom_boot_mapping_mode
107 .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
108prom_finddev_name:
109 .asciz "finddevice"
110prom_chosen_path:
111 .asciz "/chosen"
112prom_getprop_name:
113 .asciz "getprop"
114prom_mmu_name:
115 .asciz "mmu"
116prom_callmethod_name:
117 .asciz "call-method"
118prom_translate_name:
119 .asciz "translate"
120prom_map_name:
121 .asciz "map"
122prom_unmap_name:
123 .asciz "unmap"
124 .align 4
125prom_mmu_ihandle_cache:
126 .word 0
127prom_boot_mapped_pc:
128 .word 0
129prom_boot_mapping_mode:
130 .word 0
131 .align 8
132prom_boot_mapping_phys_high:
133 .xword 0
134prom_boot_mapping_phys_low:
135 .xword 0
1361:
137 rd %pc, %l0
138 mov (1b - prom_finddev_name), %l1
139 mov (1b - prom_chosen_path), %l2
140 mov (1b - prom_boot_mapped_pc), %l3
141 sub %l0, %l1, %l1
142 sub %l0, %l2, %l2
143 sub %l0, %l3, %l3
144 stw %l0, [%l3]
145 sub %sp, (192 + 128), %sp
146
147 /* chosen_node = prom_finddevice("/chosen") */
148 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
149 mov 1, %l3
150 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
151 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
152 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
153 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
154 call %l7
155 add %sp, (2047 + 128), %o0 ! argument array
156
157 ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
158
159 mov (1b - prom_getprop_name), %l1
160 mov (1b - prom_mmu_name), %l2
161 mov (1b - prom_mmu_ihandle_cache), %l5
162 sub %l0, %l1, %l1
163 sub %l0, %l2, %l2
164 sub %l0, %l5, %l5
165
166 /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
167 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
168 mov 4, %l3
169 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
170 mov 1, %l3
171 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
172 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
173 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
174 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
175 mov 4, %l3
176 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
177 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
178 call %l7
179 add %sp, (2047 + 128), %o0 ! argument array
180
181 mov (1b - prom_callmethod_name), %l1
182 mov (1b - prom_translate_name), %l2
183 sub %l0, %l1, %l1
184 sub %l0, %l2, %l2
185 lduw [%l5], %l5 ! prom_mmu_ihandle_cache
186
187 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
188 mov 3, %l3
189 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
190 mov 5, %l3
191 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
192 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
193 stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
194 srlx %l0, 22, %l3
195 sllx %l3, 22, %l3
196 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
197 stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
198 stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
199 stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
200 stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
201 stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
202 call %l7
203 add %sp, (2047 + 128), %o0 ! argument array
204
205 ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
206 mov (1b - prom_boot_mapping_mode), %l4
207 sub %l0, %l4, %l4
208 stw %l1, [%l4]
209 mov (1b - prom_boot_mapping_phys_high), %l4
210 sub %l0, %l4, %l4
211 ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
212 stx %l2, [%l4 + 0x0]
213 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
214 stx %l3, [%l4 + 0x8]
215
216 /* Leave service as-is, "call-method" */
217 mov 7, %l3
218 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
219 mov 1, %l3
220 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
221 mov (1b - prom_map_name), %l3
222 sub %l0, %l3, %l3
223 stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
224 /* Leave arg2 as-is, prom_mmu_ihandle_cache */
225 mov -1, %l3
226 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
227 sethi %hi(8 * 1024 * 1024), %l3
228 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
229 sethi %hi(KERNBASE), %l3
230 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
231 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
232 mov (1b - prom_boot_mapping_phys_low), %l3
233 sub %l0, %l3, %l3
234 ldx [%l3], %l3
235 stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
236 call %l7
237 add %sp, (2047 + 128), %o0 ! argument array
238
239 add %sp, (192 + 128), %sp
240
241sparc64_boot_after_remap:
92 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) 242 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
93 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) 243 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
94 ba,pt %xcc, spitfire_boot 244 ba,pt %xcc, spitfire_boot
@@ -125,185 +275,7 @@ cheetah_generic_boot:
125 stxa %g0, [%g3] ASI_IMMU 275 stxa %g0, [%g3] ASI_IMMU
126 membar #Sync 276 membar #Sync
127 277
128 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate 278 ba,a,pt %xcc, jump_to_sun4u_init
129 wr %g0, 0, %fprs
130
131 /* Just like for Spitfire, we probe itlb-2 for a mapping which
132 * matches our current %pc. We take the physical address in
133 * that mapping and use it to make our own.
134 */
135
136 /* %g5 holds the tlb data */
137 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
138 sllx %g5, 32, %g5
139 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
140
141 /* Put PADDR tlb data mask into %g3. */
142 sethi %uhi(_PAGE_PADDR), %g3
143 or %g3, %ulo(_PAGE_PADDR), %g3
144 sllx %g3, 32, %g3
145 sethi %hi(_PAGE_PADDR), %g7
146 or %g7, %lo(_PAGE_PADDR), %g7
147 or %g3, %g7, %g3
148
149 set 2 << 16, %l0 /* TLB entry walker. */
150 set 0x1fff, %l2 /* Page mask. */
151 rd %pc, %l3
152 andn %l3, %l2, %g2 /* vaddr comparator */
153
1541: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
155 membar #Sync
156 andn %g1, %l2, %g1
157 cmp %g1, %g2
158 be,pn %xcc, cheetah_got_tlbentry
159 nop
160 and %l0, (127 << 3), %g1
161 cmp %g1, (127 << 3)
162 blu,pt %xcc, 1b
163 add %l0, (1 << 3), %l0
164
165 /* Search the small TLB. OBP never maps us like that but
166 * newer SILO can.
167 */
168 clr %l0
169
1701: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
171 membar #Sync
172 andn %g1, %l2, %g1
173 cmp %g1, %g2
174 be,pn %xcc, cheetah_got_tlbentry
175 nop
176 cmp %l0, (15 << 3)
177 blu,pt %xcc, 1b
178 add %l0, (1 << 3), %l0
179
180 /* BUG() if we get here... */
181 ta 0x5
182
183cheetah_got_tlbentry:
184 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
185 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
186 membar #Sync
187 and %g1, %g3, %g1
188 set 0x5fff, %l0
189 andn %g1, %l0, %g1
190 or %g5, %g1, %g5
191
192 /* Clear out any KERNBASE area entries. */
193 set 2 << 16, %l0
194 sethi %hi(KERNBASE), %g3
195 sethi %hi(KERNBASE<<1), %g7
196 mov TLB_TAG_ACCESS, %l7
197
198 /* First, check ITLB */
1991: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
200 membar #Sync
201 andn %g1, %l2, %g1
202 cmp %g1, %g3
203 blu,pn %xcc, 2f
204 cmp %g1, %g7
205 bgeu,pn %xcc, 2f
206 nop
207 stxa %g0, [%l7] ASI_IMMU
208 membar #Sync
209 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
210 membar #Sync
211
2122: and %l0, (127 << 3), %g1
213 cmp %g1, (127 << 3)
214 blu,pt %xcc, 1b
215 add %l0, (1 << 3), %l0
216
217 /* Next, check DTLB */
218 set 2 << 16, %l0
2191: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
220 membar #Sync
221 andn %g1, %l2, %g1
222 cmp %g1, %g3
223 blu,pn %xcc, 2f
224 cmp %g1, %g7
225 bgeu,pn %xcc, 2f
226 nop
227 stxa %g0, [%l7] ASI_DMMU
228 membar #Sync
229 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
230 membar #Sync
231
2322: and %l0, (511 << 3), %g1
233 cmp %g1, (511 << 3)
234 blu,pt %xcc, 1b
235 add %l0, (1 << 3), %l0
236
237 /* On Cheetah+, have to check second DTLB. */
238 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
239 ba,pt %xcc, 9f
240 nop
241
2422: set 3 << 16, %l0
2431: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
244 membar #Sync
245 andn %g1, %l2, %g1
246 cmp %g1, %g3
247 blu,pn %xcc, 2f
248 cmp %g1, %g7
249 bgeu,pn %xcc, 2f
250 nop
251 stxa %g0, [%l7] ASI_DMMU
252 membar #Sync
253 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
254 membar #Sync
255
2562: and %l0, (511 << 3), %g1
257 cmp %g1, (511 << 3)
258 blu,pt %xcc, 1b
259 add %l0, (1 << 3), %l0
260
2619:
262
263 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
264 * entry 15 (and maybe 14 too).
265 */
266 sethi %hi(KERNBASE), %g3
267 set (0 << 16) | (15 << 3), %g7
268 stxa %g3, [%l7] ASI_DMMU
269 membar #Sync
270 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
271 membar #Sync
272 stxa %g3, [%l7] ASI_IMMU
273 membar #Sync
274 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
275 membar #Sync
276 flush %g3
277 membar #Sync
278 sethi %hi(_end), %g3 /* Check for bigkernel case */
279 or %g3, %lo(_end), %g3
280 srl %g3, 23, %g3 /* Check if _end > 8M */
281 brz,pt %g3, 1f
282 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
283 sethi %hi(0x400000), %g3
284 or %g3, %lo(0x400000), %g3
285 add %g5, %g3, %g5 /* New tte data */
286 andn %g5, (_PAGE_G), %g5
287 sethi %hi(KERNBASE+0x400000), %g3
288 or %g3, %lo(KERNBASE+0x400000), %g3
289 set (0 << 16) | (14 << 3), %g7
290 stxa %g3, [%l7] ASI_DMMU
291 membar #Sync
292 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
293 membar #Sync
294 stxa %g3, [%l7] ASI_IMMU
295 membar #Sync
296 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
297 membar #Sync
298 flush %g3
299 membar #Sync
300 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
301 ba,pt %xcc, 1f
302 nop
303
3041: set sun4u_init, %g2
305 jmpl %g2 + %g0, %g0
306 nop
307 279
308spitfire_boot: 280spitfire_boot:
309 /* Typically PROM has already enabled both MMU's and both on-chip 281 /* Typically PROM has already enabled both MMU's and both on-chip
@@ -313,6 +285,7 @@ spitfire_boot:
313 stxa %g1, [%g0] ASI_LSU_CONTROL 285 stxa %g1, [%g0] ASI_LSU_CONTROL
314 membar #Sync 286 membar #Sync
315 287
288jump_to_sun4u_init:
316 /* 289 /*
317 * Make sure we are in privileged mode, have address masking, 290 * Make sure we are in privileged mode, have address masking,
318 * using the ordinary globals and have enabled floating 291 * using the ordinary globals and have enabled floating
@@ -324,151 +297,6 @@ spitfire_boot:
324 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate 297 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
325 wr %g0, 0, %fprs 298 wr %g0, 0, %fprs
326 299
327spitfire_create_mappings:
328 /* %g5 holds the tlb data */
329 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
330 sllx %g5, 32, %g5
331 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
332
333 /* Base of physical memory cannot reliably be assumed to be
334 * at 0x0! Figure out where it happens to be. -DaveM
335 */
336
337 /* Put PADDR tlb data mask into %g3. */
338 sethi %uhi(_PAGE_PADDR_SF), %g3
339 or %g3, %ulo(_PAGE_PADDR_SF), %g3
340 sllx %g3, 32, %g3
341 sethi %hi(_PAGE_PADDR_SF), %g7
342 or %g7, %lo(_PAGE_PADDR_SF), %g7
343 or %g3, %g7, %g3
344
345 /* Walk through entire ITLB, looking for entry which maps
346 * our %pc currently, stick PADDR from there into %g5 tlb data.
347 */
348 clr %l0 /* TLB entry walker. */
349 set 0x1fff, %l2 /* Page mask. */
350 rd %pc, %l3
351 andn %l3, %l2, %g2 /* vaddr comparator */
3521:
353 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
354 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
355 nop
356 nop
357 nop
358 andn %g1, %l2, %g1 /* Get vaddr */
359 cmp %g1, %g2
360 be,a,pn %xcc, spitfire_got_tlbentry
361 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
362 cmp %l0, (63 << 3)
363 blu,pt %xcc, 1b
364 add %l0, (1 << 3), %l0
365
366 /* BUG() if we get here... */
367 ta 0x5
368
369spitfire_got_tlbentry:
370 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
371 nop
372 nop
373 nop
374 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
375 set 0x5fff, %l3 /* Mask offset to get phys base. */
376 andn %g1, %l3, %g1
377
378 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
379 * NOTE: the PROM cif code into the TLB.
380 */
381
382 or %g5, %g1, %g5 /* Or it into TAG being built. */
383
384 clr %l0 /* TLB entry walker. */
385 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
386 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
387 mov TLB_TAG_ACCESS, %l7
3881:
389 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
390 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
391 nop
392 nop
393 nop
394 andn %g1, %l2, %g1 /* Get vaddr */
395 cmp %g1, %g3
396 blu,pn %xcc, 2f
397 cmp %g1, %g7
398 bgeu,pn %xcc, 2f
399 nop
400 stxa %g0, [%l7] ASI_IMMU
401 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
402 membar #Sync
4032:
404 cmp %l0, (63 << 3)
405 blu,pt %xcc, 1b
406 add %l0, (1 << 3), %l0
407
408 nop; nop; nop
409
410 clr %l0 /* TLB entry walker. */
4111:
412 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
413 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
414 nop
415 nop
416 nop
417 andn %g1, %l2, %g1 /* Get vaddr */
418 cmp %g1, %g3
419 blu,pn %xcc, 2f
420 cmp %g1, %g7
421 bgeu,pn %xcc, 2f
422 nop
423 stxa %g0, [%l7] ASI_DMMU
424 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
425 membar #Sync
4262:
427 cmp %l0, (63 << 3)
428 blu,pt %xcc, 1b
429 add %l0, (1 << 3), %l0
430
431 nop; nop; nop
432
433
434 /* PROM never puts any TLB entries into the MMU with the lock bit
435 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
436 */
437
438 sethi %hi(KERNBASE), %g3
439 mov (63 << 3), %g7
440 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
441 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
442 membar #Sync
443 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
444 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
445 membar #Sync
446 flush %g3
447 membar #Sync
448 sethi %hi(_end), %g3 /* Check for bigkernel case */
449 or %g3, %lo(_end), %g3
450 srl %g3, 23, %g3 /* Check if _end > 8M */
451 brz,pt %g3, 2f
452 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
453 sethi %hi(0x400000), %g3
454 or %g3, %lo(0x400000), %g3
455 add %g5, %g3, %g5 /* New tte data */
456 andn %g5, (_PAGE_G), %g5
457 sethi %hi(KERNBASE+0x400000), %g3
458 or %g3, %lo(KERNBASE+0x400000), %g3
459 mov (62 << 3), %g7
460 stxa %g3, [%l7] ASI_DMMU
461 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
462 membar #Sync
463 stxa %g3, [%l7] ASI_IMMU
464 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
465 membar #Sync
466 flush %g3
467 membar #Sync
468 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
4692: ba,pt %xcc, 1f
470 nop
4711:
472 set sun4u_init, %g2 300 set sun4u_init, %g2
473 jmpl %g2 + %g0, %g0 301 jmpl %g2 + %g0, %g0
474 nop 302 nop
@@ -483,38 +311,12 @@ sun4u_init:
483 stxa %g0, [%g7] ASI_DMMU 311 stxa %g0, [%g7] ASI_DMMU
484 membar #Sync 312 membar #Sync
485 313
486 /* We are now safely (we hope) in Nucleus context (0), rewrite
487 * the KERNBASE TTE's so they no longer have the global bit set.
488 * Don't forget to setup TAG_ACCESS first 8-)
489 */
490 mov TLB_TAG_ACCESS, %g2
491 stxa %g3, [%g2] ASI_IMMU
492 stxa %g3, [%g2] ASI_DMMU
493 membar #Sync
494
495 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) 314 BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
496 315
497 ba,pt %xcc, spitfire_tlb_fixup 316 ba,pt %xcc, spitfire_tlb_fixup
498 nop 317 nop
499 318
500cheetah_tlb_fixup: 319cheetah_tlb_fixup:
501 set (0 << 16) | (15 << 3), %g7
502 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
503 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
504 andn %g1, (_PAGE_G), %g1
505 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
506 membar #Sync
507
508 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
509 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
510 andn %g1, (_PAGE_G), %g1
511 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
512 membar #Sync
513
514 /* Kill instruction prefetch queues. */
515 flush %g3
516 membar #Sync
517
518 mov 2, %g2 /* Set TLB type to cheetah+. */ 320 mov 2, %g2 /* Set TLB type to cheetah+. */
519 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) 321 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
520 322
@@ -551,21 +353,6 @@ cheetah_tlb_fixup:
551 nop 353 nop
552 354
553spitfire_tlb_fixup: 355spitfire_tlb_fixup:
554 mov (63 << 3), %g7
555 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
556 andn %g1, (_PAGE_G), %g1
557 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
558 membar #Sync
559
560 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
561 andn %g1, (_PAGE_G), %g1
562 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
563 membar #Sync
564
565 /* Kill instruction prefetch queues. */
566 flush %g3
567 membar #Sync
568
569 /* Set TLB type to spitfire. */ 356 /* Set TLB type to spitfire. */
570 mov 0, %g2 357 mov 0, %g2
571 sethi %hi(tlb_type), %g1 358 sethi %hi(tlb_type), %g1
@@ -578,24 +365,6 @@ tlb_fixup_done:
578 mov %sp, %l6 365 mov %sp, %l6
579 mov %o4, %l7 366 mov %o4, %l7
580 367
581#if 0 /* We don't do it like this anymore, but for historical hack value
582 * I leave this snippet here to show how crazy we can be sometimes. 8-)
583 */
584
585 /* Setup "Linux Current Register", thanks Sun 8-) */
586 wr %g0, 0x1, %pcr
587
588 /* Blackbird errata workaround. See commentary in
589 * smp.c:smp_percpu_timer_interrupt() for more
590 * information.
591 */
592 ba,pt %xcc, 99f
593 nop
594 .align 64
59599: wr %g6, %g0, %pic
596 rd %pic, %g0
597#endif
598
599 wr %g0, ASI_P, %asi 368 wr %g0, ASI_P, %asi
600 mov 1, %g1 369 mov 1, %g1
601 sllx %g1, THREAD_SHIFT, %g1 370 sllx %g1, THREAD_SHIFT, %g1
@@ -756,12 +525,7 @@ bootup_user_stack_end:
756 525
757#include "ttable.S" 526#include "ttable.S"
758#include "systbls.S" 527#include "systbls.S"
759 528#include "ktlb.S"
760 .align 1024
761 .globl swapper_pg_dir
762swapper_pg_dir:
763 .word 0
764
765#include "etrap.S" 529#include "etrap.S"
766#include "rtrap.S" 530#include "rtrap.S"
767#include "winfixup.S" 531#include "winfixup.S"
@@ -776,8 +540,11 @@ swapper_pg_dir:
776prom_tba: .xword 0 540prom_tba: .xword 0
777tlb_type: .word 0 /* Must NOT end up in BSS */ 541tlb_type: .word 0 /* Must NOT end up in BSS */
778 .section ".fixup",#alloc,#execinstr 542 .section ".fixup",#alloc,#execinstr
779 .globl __ret_efault 543
544 .globl __ret_efault, __retl_efault
780__ret_efault: 545__ret_efault:
781 ret 546 ret
782 restore %g0, -EFAULT, %o0 547 restore %g0, -EFAULT, %o0
783 548__retl_efault:
549 retl
550 mov -EFAULT, %o0
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
new file mode 100644
index 000000000000..7796b37f478c
--- /dev/null
+++ b/arch/sparc64/kernel/ktlb.S
@@ -0,0 +1,198 @@
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7*/
8
9#include <linux/config.h>
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15 .text
16 .align 32
17
18/*
19 * On a second level vpte miss, check whether the original fault is to the OBP
20 * range (note that this is only possible for instruction miss, data misses to
21 * obp range do not use vpte). If so, go back directly to the faulting address.
22 * This is because we want to read the tpc, otherwise we have no way of knowing
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
24 * also ensures no vpte range addresses are dropped into tlb while obp is
25 * executing (see inherit_locked_prom_mappings() rant).
26 */
27sparc64_vpte_nucleus:
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
31 */
32 sethi %hi(LOW_OBP_ADDRESS), %g5
33 cmp %g4, %g5
34 blu,pn %xcc, kern_vpte
35 mov 0x1, %g5
36 sllx %g5, 32, %g5
37 cmp %g4, %g5
38 blu,pn %xcc, vpte_insn_obp
39 nop
40
41 /* These two instructions are patched by paginig_init(). */
42kern_vpte:
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
45
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
49
50vpte_noent:
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
52 * skip over the trap instruction so that the top level
53 * TLB miss handler will thing this %g5 value is just an
54 * invalid PTE, thus branching to full fault processing.
55 */
56 mov TLB_SFSR, %g1
57 stxa %g4, [%g1 + %g1] ASI_DMMU
58 done
59
60vpte_insn_obp:
61 sethi %hi(prom_pmd_phys), %g5
62 ldx [%g5 + %lo(prom_pmd_phys)], %g5
63
64 /* Behave as if we are at TL0. */
65 wrpr %g0, 1, %tl
66 rdpr %tpc, %g4 /* Find original faulting iaddr */
67 srlx %g4, 13, %g4 /* Throw out context bits */
68 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
69
70 /* Restore previous TAG_ACCESS. */
71 mov TLB_SFSR, %g1
72 stxa %g4, [%g1 + %g1] ASI_IMMU
73
74 /* Get PMD offset. */
75 srlx %g4, 23, %g6
76 and %g6, 0x7ff, %g6
77 sllx %g6, 2, %g6
78
79 /* Load PMD, is it valid? */
80 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
81 brz,pn %g5, longpath
82 sllx %g5, 11, %g5
83
84 /* Get PTE offset. */
85 srlx %g4, 13, %g6
86 and %g6, 0x3ff, %g6
87 sllx %g6, 3, %g6
88
89 /* Load PTE. */
90 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
91 brgez,pn %g5, longpath
92 nop
93
94 /* TLB load and return from trap. */
95 stxa %g5, [%g0] ASI_ITLB_DATA_IN
96 retry
97
98kvmap_do_obp:
99 sethi %hi(prom_pmd_phys), %g5
100 ldx [%g5 + %lo(prom_pmd_phys)], %g5
101
102 /* Get PMD offset. */
103 srlx %g4, 23, %g6
104 and %g6, 0x7ff, %g6
105 sllx %g6, 2, %g6
106
107 /* Load PMD, is it valid? */
108 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
109 brz,pn %g5, longpath
110 sllx %g5, 11, %g5
111
112 /* Get PTE offset. */
113 srlx %g4, 13, %g6
114 and %g6, 0x3ff, %g6
115 sllx %g6, 3, %g6
116
117 /* Load PTE. */
118 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
119 brgez,pn %g5, longpath
120 nop
121
122 /* TLB load and return from trap. */
123 stxa %g5, [%g0] ASI_DTLB_DATA_IN
124 retry
125
126/*
127 * On a first level data miss, check whether this is to the OBP range (note
128 * that such accesses can be made by prom, as well as by kernel using
129 * prom_getproperty on "address"), and if so, do not use vpte access ...
130 * rather, use information saved during inherit_prom_mappings() using 8k
131 * pagesize.
132 */
133 .align 32
134kvmap:
135 brgez,pn %g4, kvmap_nonlinear
136 nop
137
138#ifdef CONFIG_DEBUG_PAGEALLOC
139 .globl kvmap_linear_patch
140kvmap_linear_patch:
141#endif
142 ba,pt %xcc, kvmap_load
143 xor %g2, %g4, %g5
144
145#ifdef CONFIG_DEBUG_PAGEALLOC
146 sethi %hi(swapper_pg_dir), %g5
147 or %g5, %lo(swapper_pg_dir), %g5
148 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
149 srlx %g6, 64 - PAGE_SHIFT, %g6
150 andn %g6, 0x3, %g6
151 lduw [%g5 + %g6], %g5
152 brz,pn %g5, longpath
153 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
154 srlx %g6, 64 - PAGE_SHIFT, %g6
155 sllx %g5, 11, %g5
156 andn %g6, 0x3, %g6
157 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
158 brz,pn %g5, longpath
159 sllx %g4, 64 - PMD_SHIFT, %g6
160 srlx %g6, 64 - PAGE_SHIFT, %g6
161 sllx %g5, 11, %g5
162 andn %g6, 0x7, %g6
163 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
164 brz,pn %g5, longpath
165 nop
166 ba,a,pt %xcc, kvmap_load
167#endif
168
169kvmap_nonlinear:
170 sethi %hi(MODULES_VADDR), %g5
171 cmp %g4, %g5
172 blu,pn %xcc, longpath
173 mov (VMALLOC_END >> 24), %g5
174 sllx %g5, 24, %g5
175 cmp %g4, %g5
176 bgeu,pn %xcc, longpath
177 nop
178
179kvmap_check_obp:
180 sethi %hi(LOW_OBP_ADDRESS), %g5
181 cmp %g4, %g5
182 blu,pn %xcc, kvmap_vmalloc_addr
183 mov 0x1, %g5
184 sllx %g5, 32, %g5
185 cmp %g4, %g5
186 blu,pn %xcc, kvmap_do_obp
187 nop
188
189kvmap_vmalloc_addr:
190 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
191 ldxa [%g3 + %g6] ASI_N, %g5
192 brgez,pn %g5, longpath
193 nop
194
195kvmap_load:
196 /* PTE is valid, load into TLB and return from trap. */
197 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
198 retry
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 331382e1a75d..cae5b61fe2f0 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
331{ 331{
332 unsigned long sync_reg = (unsigned long) _arg2; 332 unsigned long sync_reg = (unsigned long) _arg2;
333 u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); 333 u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO);
334 u64 val; 334 u64 val;
335 int limit; 335 int limit;
336 336
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 5efbff90d668..774ecbb8a031 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -31,6 +31,7 @@
31#include <asm/visasm.h> 31#include <asm/visasm.h>
32#include <asm/spitfire.h> 32#include <asm/spitfire.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/cpudata.h>
34 35
35/* Returning from ptrace is a bit tricky because the syscall return 36/* Returning from ptrace is a bit tricky because the syscall return
36 * low level code assumes any value returned which is negative and 37 * low level code assumes any value returned which is negative and
@@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
132 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { 133 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
133 unsigned long start = __pa(kaddr); 134 unsigned long start = __pa(kaddr);
134 unsigned long end = start + len; 135 unsigned long end = start + len;
136 unsigned long dcache_line_size;
137
138 dcache_line_size = local_cpu_data().dcache_line_size;
135 139
136 if (tlb_type == spitfire) { 140 if (tlb_type == spitfire) {
137 for (; start < end; start += 32) 141 for (; start < end; start += dcache_line_size)
138 spitfire_put_dcache_tag(start & 0x3fe0, 0x0); 142 spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
139 } else { 143 } else {
140 for (; start < end; start += 32) 144 start &= ~(dcache_line_size - 1);
145 for (; start < end; start += dcache_line_size)
141 __asm__ __volatile__( 146 __asm__ __volatile__(
142 "stxa %%g0, [%0] %1\n\t" 147 "stxa %%g0, [%0] %1\n\t"
143 "membar #Sync" 148 "membar #Sync"
@@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
150 if (write && tlb_type == spitfire) { 155 if (write && tlb_type == spitfire) {
151 unsigned long start = (unsigned long) kaddr; 156 unsigned long start = (unsigned long) kaddr;
152 unsigned long end = start + len; 157 unsigned long end = start + len;
158 unsigned long icache_line_size;
159
160 icache_line_size = local_cpu_data().icache_line_size;
153 161
154 for (; start < end; start += 32) 162 for (; start < end; start += icache_line_size)
155 flushi(start); 163 flushi(start);
156 } 164 }
157} 165}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index ddbed3341a23..4c9c8f241748 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -464,8 +464,6 @@ static void __init boot_flags_init(char *commands)
464 } 464 }
465} 465}
466 466
467extern int prom_probe_memory(void);
468extern unsigned long start, end;
469extern void panic_setup(char *, int *); 467extern void panic_setup(char *, int *);
470 468
471extern unsigned short root_flags; 469extern unsigned short root_flags;
@@ -492,13 +490,8 @@ void register_prom_callbacks(void)
492 "' linux-.soft2 to .soft2"); 490 "' linux-.soft2 to .soft2");
493} 491}
494 492
495extern void paging_init(void);
496
497void __init setup_arch(char **cmdline_p) 493void __init setup_arch(char **cmdline_p)
498{ 494{
499 unsigned long highest_paddr;
500 int i;
501
502 /* Initialize PROM console and command line. */ 495 /* Initialize PROM console and command line. */
503 *cmdline_p = prom_getbootargs(); 496 *cmdline_p = prom_getbootargs();
504 strcpy(saved_command_line, *cmdline_p); 497 strcpy(saved_command_line, *cmdline_p);
@@ -517,40 +510,6 @@ void __init setup_arch(char **cmdline_p)
517 boot_flags_init(*cmdline_p); 510 boot_flags_init(*cmdline_p);
518 511
519 idprom_init(); 512 idprom_init();
520 (void) prom_probe_memory();
521
522 /* In paging_init() we tip off this value to see if we need
523 * to change init_mm.pgd to point to the real alias mapping.
524 */
525 phys_base = 0xffffffffffffffffUL;
526 highest_paddr = 0UL;
527 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
528 unsigned long top;
529
530 if (sp_banks[i].base_addr < phys_base)
531 phys_base = sp_banks[i].base_addr;
532 top = sp_banks[i].base_addr +
533 sp_banks[i].num_bytes;
534 if (highest_paddr < top)
535 highest_paddr = top;
536 }
537 pfn_base = phys_base >> PAGE_SHIFT;
538
539 switch (tlb_type) {
540 default:
541 case spitfire:
542 kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
543 kern_base &= _PAGE_PADDR_SF;
544 break;
545
546 case cheetah:
547 case cheetah_plus:
548 kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
549 kern_base &= _PAGE_PADDR;
550 break;
551 };
552
553 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
554 513
555 if (!root_flags) 514 if (!root_flags)
556 root_mountflags &= ~MS_RDONLY; 515 root_mountflags &= ~MS_RDONLY;
@@ -625,6 +584,9 @@ extern void smp_info(struct seq_file *);
625extern void smp_bogo(struct seq_file *); 584extern void smp_bogo(struct seq_file *);
626extern void mmu_info(struct seq_file *); 585extern void mmu_info(struct seq_file *);
627 586
587unsigned int dcache_parity_tl1_occurred;
588unsigned int icache_parity_tl1_occurred;
589
628static int show_cpuinfo(struct seq_file *m, void *__unused) 590static int show_cpuinfo(struct seq_file *m, void *__unused)
629{ 591{
630 seq_printf(m, 592 seq_printf(m,
@@ -635,6 +597,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
635 "type\t\t: sun4u\n" 597 "type\t\t: sun4u\n"
636 "ncpus probed\t: %ld\n" 598 "ncpus probed\t: %ld\n"
637 "ncpus active\t: %ld\n" 599 "ncpus active\t: %ld\n"
600 "D$ parity tl1\t: %u\n"
601 "I$ parity tl1\t: %u\n"
638#ifndef CONFIG_SMP 602#ifndef CONFIG_SMP
639 "Cpu0Bogo\t: %lu.%02lu\n" 603 "Cpu0Bogo\t: %lu.%02lu\n"
640 "Cpu0ClkTck\t: %016lx\n" 604 "Cpu0ClkTck\t: %016lx\n"
@@ -647,7 +611,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
647 (prom_prev >> 8) & 0xff, 611 (prom_prev >> 8) & 0xff,
648 prom_prev & 0xff, 612 prom_prev & 0xff,
649 (long)num_possible_cpus(), 613 (long)num_possible_cpus(),
650 (long)num_online_cpus() 614 (long)num_online_cpus(),
615 dcache_parity_tl1_occurred,
616 icache_parity_tl1_occurred
651#ifndef CONFIG_SMP 617#ifndef CONFIG_SMP
652 , cpu_data(0).udelay_val/(500000/HZ), 618 , cpu_data(0).udelay_val/(500000/HZ),
653 (cpu_data(0).udelay_val/(5000/HZ)) % 100, 619 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b4fc6a5462b2..590df5a16f5a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id)
93 cpu_data(id).pte_cache[1] = NULL; 93 cpu_data(id).pte_cache[1] = NULL;
94 cpu_data(id).pgd_cache = NULL; 94 cpu_data(id).pgd_cache = NULL;
95 cpu_data(id).idle_volume = 1; 95 cpu_data(id).idle_volume = 1;
96
97 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
98 16 * 1024);
99 cpu_data(id).dcache_line_size =
100 prom_getintdefault(cpu_node, "dcache-line-size", 32);
101 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
102 16 * 1024);
103 cpu_data(id).icache_line_size =
104 prom_getintdefault(cpu_node, "icache-line-size", 32);
105 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
106 4 * 1024 * 1024);
107 cpu_data(id).ecache_line_size =
108 prom_getintdefault(cpu_node, "ecache-line-size", 64);
109 printk("CPU[%d]: Caches "
110 "D[sz(%d):line_sz(%d)] "
111 "I[sz(%d):line_sz(%d)] "
112 "E[sz(%d):line_sz(%d)]\n",
113 id,
114 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
115 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
116 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
96} 117}
97 118
98static void smp_setup_percpu_timer(void); 119static void smp_setup_percpu_timer(void);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index 5f9e4fae612e..9cd272ac3ac1 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */
157 or %g2, %lo(__socketcall_table_begin), %g2 157 or %g2, %lo(__socketcall_table_begin), %g2
158 jmpl %g2 + %o0, %g0 158 jmpl %g2 + %o0, %g0
159 nop 159 nop
160do_einval:
161 retl
162 mov -EINVAL, %o0
160 163
161 /* Each entry is exactly 32 bytes. */
162 .align 32 164 .align 32
163__socketcall_table_begin: 165__socketcall_table_begin:
166
167 /* Each entry is exactly 32 bytes. */
164do_sys_socket: /* sys_socket(int, int, int) */ 168do_sys_socket: /* sys_socket(int, int, int) */
165 ldswa [%o1 + 0x0] %asi, %o0 1691: ldswa [%o1 + 0x0] %asi, %o0
166 sethi %hi(sys_socket), %g1 170 sethi %hi(sys_socket), %g1
167 ldswa [%o1 + 0x8] %asi, %o2 1712: ldswa [%o1 + 0x8] %asi, %o2
168 jmpl %g1 + %lo(sys_socket), %g0 172 jmpl %g1 + %lo(sys_socket), %g0
169 ldswa [%o1 + 0x4] %asi, %o1 1733: ldswa [%o1 + 0x4] %asi, %o1
170 nop 174 nop
171 nop 175 nop
172 nop 176 nop
173do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ 177do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
174 ldswa [%o1 + 0x0] %asi, %o0 1784: ldswa [%o1 + 0x0] %asi, %o0
175 sethi %hi(sys_bind), %g1 179 sethi %hi(sys_bind), %g1
176 ldswa [%o1 + 0x8] %asi, %o2 1805: ldswa [%o1 + 0x8] %asi, %o2
177 jmpl %g1 + %lo(sys_bind), %g0 181 jmpl %g1 + %lo(sys_bind), %g0
178 lduwa [%o1 + 0x4] %asi, %o1 1826: lduwa [%o1 + 0x4] %asi, %o1
179 nop 183 nop
180 nop 184 nop
181 nop 185 nop
182do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ 186do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
183 ldswa [%o1 + 0x0] %asi, %o0 1877: ldswa [%o1 + 0x0] %asi, %o0
184 sethi %hi(sys_connect), %g1 188 sethi %hi(sys_connect), %g1
185 ldswa [%o1 + 0x8] %asi, %o2 1898: ldswa [%o1 + 0x8] %asi, %o2
186 jmpl %g1 + %lo(sys_connect), %g0 190 jmpl %g1 + %lo(sys_connect), %g0
187 lduwa [%o1 + 0x4] %asi, %o1 1919: lduwa [%o1 + 0x4] %asi, %o1
188 nop 192 nop
189 nop 193 nop
190 nop 194 nop
191do_sys_listen: /* sys_listen(int, int) */ 195do_sys_listen: /* sys_listen(int, int) */
192 ldswa [%o1 + 0x0] %asi, %o0 19610: ldswa [%o1 + 0x0] %asi, %o0
193 sethi %hi(sys_listen), %g1 197 sethi %hi(sys_listen), %g1
194 jmpl %g1 + %lo(sys_listen), %g0 198 jmpl %g1 + %lo(sys_listen), %g0
195 ldswa [%o1 + 0x4] %asi, %o1 19911: ldswa [%o1 + 0x4] %asi, %o1
196 nop 200 nop
197 nop 201 nop
198 nop 202 nop
199 nop 203 nop
200do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ 204do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
201 ldswa [%o1 + 0x0] %asi, %o0 20512: ldswa [%o1 + 0x0] %asi, %o0
202 sethi %hi(sys_accept), %g1 206 sethi %hi(sys_accept), %g1
203 lduwa [%o1 + 0x8] %asi, %o2 20713: lduwa [%o1 + 0x8] %asi, %o2
204 jmpl %g1 + %lo(sys_accept), %g0 208 jmpl %g1 + %lo(sys_accept), %g0
205 lduwa [%o1 + 0x4] %asi, %o1 20914: lduwa [%o1 + 0x4] %asi, %o1
206 nop 210 nop
207 nop 211 nop
208 nop 212 nop
209do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ 213do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
210 ldswa [%o1 + 0x0] %asi, %o0 21415: ldswa [%o1 + 0x0] %asi, %o0
211 sethi %hi(sys_getsockname), %g1 215 sethi %hi(sys_getsockname), %g1
212 lduwa [%o1 + 0x8] %asi, %o2 21616: lduwa [%o1 + 0x8] %asi, %o2
213 jmpl %g1 + %lo(sys_getsockname), %g0 217 jmpl %g1 + %lo(sys_getsockname), %g0
214 lduwa [%o1 + 0x4] %asi, %o1 21817: lduwa [%o1 + 0x4] %asi, %o1
215 nop 219 nop
216 nop 220 nop
217 nop 221 nop
218do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ 222do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
219 ldswa [%o1 + 0x0] %asi, %o0 22318: ldswa [%o1 + 0x0] %asi, %o0
220 sethi %hi(sys_getpeername), %g1 224 sethi %hi(sys_getpeername), %g1
221 lduwa [%o1 + 0x8] %asi, %o2 22519: lduwa [%o1 + 0x8] %asi, %o2
222 jmpl %g1 + %lo(sys_getpeername), %g0 226 jmpl %g1 + %lo(sys_getpeername), %g0
223 lduwa [%o1 + 0x4] %asi, %o1 22720: lduwa [%o1 + 0x4] %asi, %o1
224 nop 228 nop
225 nop 229 nop
226 nop 230 nop
227do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ 231do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
228 ldswa [%o1 + 0x0] %asi, %o0 23221: ldswa [%o1 + 0x0] %asi, %o0
229 sethi %hi(sys_socketpair), %g1 233 sethi %hi(sys_socketpair), %g1
230 ldswa [%o1 + 0x8] %asi, %o2 23422: ldswa [%o1 + 0x8] %asi, %o2
231 lduwa [%o1 + 0xc] %asi, %o3 23523: lduwa [%o1 + 0xc] %asi, %o3
232 jmpl %g1 + %lo(sys_socketpair), %g0 236 jmpl %g1 + %lo(sys_socketpair), %g0
233 ldswa [%o1 + 0x4] %asi, %o1 23724: ldswa [%o1 + 0x4] %asi, %o1
234 nop 238 nop
235 nop 239 nop
236do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ 240do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
237 ldswa [%o1 + 0x0] %asi, %o0 24125: ldswa [%o1 + 0x0] %asi, %o0
238 sethi %hi(sys_send), %g1 242 sethi %hi(sys_send), %g1
239 lduwa [%o1 + 0x8] %asi, %o2 24326: lduwa [%o1 + 0x8] %asi, %o2
240 lduwa [%o1 + 0xc] %asi, %o3 24427: lduwa [%o1 + 0xc] %asi, %o3
241 jmpl %g1 + %lo(sys_send), %g0 245 jmpl %g1 + %lo(sys_send), %g0
242 lduwa [%o1 + 0x4] %asi, %o1 24628: lduwa [%o1 + 0x4] %asi, %o1
243 nop 247 nop
244 nop 248 nop
245do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ 249do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
246 ldswa [%o1 + 0x0] %asi, %o0 25029: ldswa [%o1 + 0x0] %asi, %o0
247 sethi %hi(sys_recv), %g1 251 sethi %hi(sys_recv), %g1
248 lduwa [%o1 + 0x8] %asi, %o2 25230: lduwa [%o1 + 0x8] %asi, %o2
249 lduwa [%o1 + 0xc] %asi, %o3 25331: lduwa [%o1 + 0xc] %asi, %o3
250 jmpl %g1 + %lo(sys_recv), %g0 254 jmpl %g1 + %lo(sys_recv), %g0
251 lduwa [%o1 + 0x4] %asi, %o1 25532: lduwa [%o1 + 0x4] %asi, %o1
252 nop 256 nop
253 nop 257 nop
254do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ 258do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
255 ldswa [%o1 + 0x0] %asi, %o0 25933: ldswa [%o1 + 0x0] %asi, %o0
256 sethi %hi(sys_sendto), %g1 260 sethi %hi(sys_sendto), %g1
257 lduwa [%o1 + 0x8] %asi, %o2 26134: lduwa [%o1 + 0x8] %asi, %o2
258 lduwa [%o1 + 0xc] %asi, %o3 26235: lduwa [%o1 + 0xc] %asi, %o3
259 lduwa [%o1 + 0x10] %asi, %o4 26336: lduwa [%o1 + 0x10] %asi, %o4
260 ldswa [%o1 + 0x14] %asi, %o5 26437: ldswa [%o1 + 0x14] %asi, %o5
261 jmpl %g1 + %lo(sys_sendto), %g0 265 jmpl %g1 + %lo(sys_sendto), %g0
262 lduwa [%o1 + 0x4] %asi, %o1 26638: lduwa [%o1 + 0x4] %asi, %o1
263do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ 267do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
264 ldswa [%o1 + 0x0] %asi, %o0 26839: ldswa [%o1 + 0x0] %asi, %o0
265 sethi %hi(sys_recvfrom), %g1 269 sethi %hi(sys_recvfrom), %g1
266 lduwa [%o1 + 0x8] %asi, %o2 27040: lduwa [%o1 + 0x8] %asi, %o2
267 lduwa [%o1 + 0xc] %asi, %o3 27141: lduwa [%o1 + 0xc] %asi, %o3
268 lduwa [%o1 + 0x10] %asi, %o4 27242: lduwa [%o1 + 0x10] %asi, %o4
269 lduwa [%o1 + 0x14] %asi, %o5 27343: lduwa [%o1 + 0x14] %asi, %o5
270 jmpl %g1 + %lo(sys_recvfrom), %g0 274 jmpl %g1 + %lo(sys_recvfrom), %g0
271 lduwa [%o1 + 0x4] %asi, %o1 27544: lduwa [%o1 + 0x4] %asi, %o1
272do_sys_shutdown: /* sys_shutdown(int, int) */ 276do_sys_shutdown: /* sys_shutdown(int, int) */
273 ldswa [%o1 + 0x0] %asi, %o0 27745: ldswa [%o1 + 0x0] %asi, %o0
274 sethi %hi(sys_shutdown), %g1 278 sethi %hi(sys_shutdown), %g1
275 jmpl %g1 + %lo(sys_shutdown), %g0 279 jmpl %g1 + %lo(sys_shutdown), %g0
276 ldswa [%o1 + 0x4] %asi, %o1 28046: ldswa [%o1 + 0x4] %asi, %o1
277 nop 281 nop
278 nop 282 nop
279 nop 283 nop
280 nop 284 nop
281do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ 285do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
282 ldswa [%o1 + 0x0] %asi, %o0 28647: ldswa [%o1 + 0x0] %asi, %o0
283 sethi %hi(compat_sys_setsockopt), %g1 287 sethi %hi(compat_sys_setsockopt), %g1
284 ldswa [%o1 + 0x8] %asi, %o2 28848: ldswa [%o1 + 0x8] %asi, %o2
285 lduwa [%o1 + 0xc] %asi, %o3 28949: lduwa [%o1 + 0xc] %asi, %o3
286 ldswa [%o1 + 0x10] %asi, %o4 29050: ldswa [%o1 + 0x10] %asi, %o4
287 jmpl %g1 + %lo(compat_sys_setsockopt), %g0 291 jmpl %g1 + %lo(compat_sys_setsockopt), %g0
288 ldswa [%o1 + 0x4] %asi, %o1 29251: ldswa [%o1 + 0x4] %asi, %o1
289 nop 293 nop
290do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ 294do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
291 ldswa [%o1 + 0x0] %asi, %o0 29552: ldswa [%o1 + 0x0] %asi, %o0
292 sethi %hi(compat_sys_getsockopt), %g1 296 sethi %hi(compat_sys_getsockopt), %g1
293 ldswa [%o1 + 0x8] %asi, %o2 29753: ldswa [%o1 + 0x8] %asi, %o2
294 lduwa [%o1 + 0xc] %asi, %o3 29854: lduwa [%o1 + 0xc] %asi, %o3
295 lduwa [%o1 + 0x10] %asi, %o4 29955: lduwa [%o1 + 0x10] %asi, %o4
296 jmpl %g1 + %lo(compat_sys_getsockopt), %g0 300 jmpl %g1 + %lo(compat_sys_getsockopt), %g0
297 ldswa [%o1 + 0x4] %asi, %o1 30156: ldswa [%o1 + 0x4] %asi, %o1
298 nop 302 nop
299do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ 303do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
300 ldswa [%o1 + 0x0] %asi, %o0 30457: ldswa [%o1 + 0x0] %asi, %o0
301 sethi %hi(compat_sys_sendmsg), %g1 305 sethi %hi(compat_sys_sendmsg), %g1
302 lduwa [%o1 + 0x8] %asi, %o2 30658: lduwa [%o1 + 0x8] %asi, %o2
303 jmpl %g1 + %lo(compat_sys_sendmsg), %g0 307 jmpl %g1 + %lo(compat_sys_sendmsg), %g0
304 lduwa [%o1 + 0x4] %asi, %o1 30859: lduwa [%o1 + 0x4] %asi, %o1
305 nop 309 nop
306 nop 310 nop
307 nop 311 nop
308do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ 312do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
309 ldswa [%o1 + 0x0] %asi, %o0 31360: ldswa [%o1 + 0x0] %asi, %o0
310 sethi %hi(compat_sys_recvmsg), %g1 314 sethi %hi(compat_sys_recvmsg), %g1
311 lduwa [%o1 + 0x8] %asi, %o2 31561: lduwa [%o1 + 0x8] %asi, %o2
312 jmpl %g1 + %lo(compat_sys_recvmsg), %g0 316 jmpl %g1 + %lo(compat_sys_recvmsg), %g0
313 lduwa [%o1 + 0x4] %asi, %o1 31762: lduwa [%o1 + 0x4] %asi, %o1
314 nop 318 nop
315 nop 319 nop
316 nop 320 nop
317__socketcall_table_end:
318
319do_einval:
320 retl
321 mov -EINVAL, %o0
322do_efault:
323 retl
324 mov -EFAULT, %o0
325 321
326 .section __ex_table 322 .section __ex_table
327 .align 4 323 .align 4
328 .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault 324 .word 1b, __retl_efault, 2b, __retl_efault
325 .word 3b, __retl_efault, 4b, __retl_efault
326 .word 5b, __retl_efault, 6b, __retl_efault
327 .word 7b, __retl_efault, 8b, __retl_efault
328 .word 9b, __retl_efault, 10b, __retl_efault
329 .word 11b, __retl_efault, 12b, __retl_efault
330 .word 13b, __retl_efault, 14b, __retl_efault
331 .word 15b, __retl_efault, 16b, __retl_efault
332 .word 17b, __retl_efault, 18b, __retl_efault
333 .word 19b, __retl_efault, 20b, __retl_efault
334 .word 21b, __retl_efault, 22b, __retl_efault
335 .word 23b, __retl_efault, 24b, __retl_efault
336 .word 25b, __retl_efault, 26b, __retl_efault
337 .word 27b, __retl_efault, 28b, __retl_efault
338 .word 29b, __retl_efault, 30b, __retl_efault
339 .word 31b, __retl_efault, 32b, __retl_efault
340 .word 33b, __retl_efault, 34b, __retl_efault
341 .word 35b, __retl_efault, 36b, __retl_efault
342 .word 37b, __retl_efault, 38b, __retl_efault
343 .word 39b, __retl_efault, 40b, __retl_efault
344 .word 41b, __retl_efault, 42b, __retl_efault
345 .word 43b, __retl_efault, 44b, __retl_efault
346 .word 45b, __retl_efault, 46b, __retl_efault
347 .word 47b, __retl_efault, 48b, __retl_efault
348 .word 49b, __retl_efault, 50b, __retl_efault
349 .word 51b, __retl_efault, 52b, __retl_efault
350 .word 53b, __retl_efault, 54b, __retl_efault
351 .word 55b, __retl_efault, 56b, __retl_efault
352 .word 57b, __retl_efault, 58b, __retl_efault
353 .word 59b, __retl_efault, 60b, __retl_efault
354 .word 61b, __retl_efault, 62b, __retl_efault
329 .previous 355 .previous
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 3a145fc39cf2..89f2fcfcd662 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -119,8 +119,8 @@ startup_continue:
119 sethi %hi(itlb_load), %g2 119 sethi %hi(itlb_load), %g2
120 or %g2, %lo(itlb_load), %g2 120 or %g2, %lo(itlb_load), %g2
121 stx %g2, [%sp + 2047 + 128 + 0x18] 121 stx %g2, [%sp + 2047 + 128 + 0x18]
122 sethi %hi(mmu_ihandle_cache), %g2 122 sethi %hi(prom_mmu_ihandle_cache), %g2
123 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 123 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
124 stx %g2, [%sp + 2047 + 128 + 0x20] 124 stx %g2, [%sp + 2047 + 128 + 0x20]
125 sethi %hi(KERNBASE), %g2 125 sethi %hi(KERNBASE), %g2
126 stx %g2, [%sp + 2047 + 128 + 0x28] 126 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -156,8 +156,8 @@ startup_continue:
156 sethi %hi(itlb_load), %g2 156 sethi %hi(itlb_load), %g2
157 or %g2, %lo(itlb_load), %g2 157 or %g2, %lo(itlb_load), %g2
158 stx %g2, [%sp + 2047 + 128 + 0x18] 158 stx %g2, [%sp + 2047 + 128 + 0x18]
159 sethi %hi(mmu_ihandle_cache), %g2 159 sethi %hi(prom_mmu_ihandle_cache), %g2
160 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 160 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
161 stx %g2, [%sp + 2047 + 128 + 0x20] 161 stx %g2, [%sp + 2047 + 128 + 0x20]
162 sethi %hi(KERNBASE + 0x400000), %g2 162 sethi %hi(KERNBASE + 0x400000), %g2
163 stx %g2, [%sp + 2047 + 128 + 0x28] 163 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -190,8 +190,8 @@ do_dtlb:
190 sethi %hi(dtlb_load), %g2 190 sethi %hi(dtlb_load), %g2
191 or %g2, %lo(dtlb_load), %g2 191 or %g2, %lo(dtlb_load), %g2
192 stx %g2, [%sp + 2047 + 128 + 0x18] 192 stx %g2, [%sp + 2047 + 128 + 0x18]
193 sethi %hi(mmu_ihandle_cache), %g2 193 sethi %hi(prom_mmu_ihandle_cache), %g2
194 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 194 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
195 stx %g2, [%sp + 2047 + 128 + 0x20] 195 stx %g2, [%sp + 2047 + 128 + 0x20]
196 sethi %hi(KERNBASE), %g2 196 sethi %hi(KERNBASE), %g2
197 stx %g2, [%sp + 2047 + 128 + 0x28] 197 stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -228,8 +228,8 @@ do_dtlb:
228 sethi %hi(dtlb_load), %g2 228 sethi %hi(dtlb_load), %g2
229 or %g2, %lo(dtlb_load), %g2 229 or %g2, %lo(dtlb_load), %g2
230 stx %g2, [%sp + 2047 + 128 + 0x18] 230 stx %g2, [%sp + 2047 + 128 + 0x18]
231 sethi %hi(mmu_ihandle_cache), %g2 231 sethi %hi(prom_mmu_ihandle_cache), %g2
232 lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 232 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
233 stx %g2, [%sp + 2047 + 128 + 0x20] 233 stx %g2, [%sp + 2047 + 128 + 0x20]
234 sethi %hi(KERNBASE + 0x400000), %g2 234 sethi %hi(KERNBASE + 0x400000), %g2
235 stx %g2, [%sp + 2047 + 128 + 0x28] 235 stx %g2, [%sp + 2047 + 128 + 0x28]
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index b280b2ef674f..5570e7bb22bb 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
189 189
190 if (regs->tstate & TSTATE_PRIV) { 190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */ 191 /* Test if this comes from uaccess places. */
192 unsigned long fixup; 192 const struct exception_table_entry *entry;
193 unsigned long g2 = regs->u_regs[UREG_G2];
194 193
195 if ((fixup = search_extables_range(regs->tpc, &g2))) { 194 entry = search_exception_tables(regs->tpc);
196 /* Ouch, somebody is trying ugly VM hole tricks on us... */ 195 if (entry) {
196 /* Ouch, somebody is trying VM hole tricks on us... */
197#ifdef DEBUG_EXCEPTIONS 197#ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); 198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx> " 199 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
200 "g2<%016lx>\n", regs->tpc, fixup, g2); 200 regs->tpc, entry->fixup);
201#endif 201#endif
202 regs->tpc = fixup; 202 regs->tpc = entry->fixup;
203 regs->tnpc = regs->tpc + 4; 203 regs->tnpc = regs->tpc + 4;
204 regs->u_regs[UREG_G2] = g2;
205 return; 204 return;
206 } 205 }
207 /* Shit... */ 206 /* Shit... */
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void)
758 ecache_flush_size = (2 * largest_size); 757 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize; 758 ecache_flush_linesize = smallest_linesize;
760 759
761 /* Discover a physically contiguous chunk of physical 760 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
762 * memory in 'sp_banks' of size ecache_flush_size calculated
763 * above. Store the physical base of this area at
764 * ecache_flush_physbase.
765 */
766 for (node = 0; ; node++) {
767 if (sp_banks[node].num_bytes == 0)
768 break;
769 if (sp_banks[node].num_bytes >= ecache_flush_size) {
770 ecache_flush_physbase = sp_banks[node].base_addr;
771 break;
772 }
773 }
774 761
775 /* Note: Zero would be a valid value of ecache_flush_physbase so 762 if (ecache_flush_physbase == ~0UL) {
776 * don't use that as the success test. :-)
777 */
778 if (sp_banks[node].num_bytes == 0) {
779 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " 763 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
780 "contiguous physical memory.\n", ecache_flush_size); 764 "contiguous physical memory.\n",
765 ecache_flush_size);
781 prom_halt(); 766 prom_halt();
782 } 767 }
783 768
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr)
869 */ 854 */
870static void __cheetah_flush_icache(void) 855static void __cheetah_flush_icache(void)
871{ 856{
872 unsigned long i; 857 unsigned int icache_size, icache_line_size;
858 unsigned long addr;
859
860 icache_size = local_cpu_data().icache_size;
861 icache_line_size = local_cpu_data().icache_line_size;
873 862
874 /* Clear the valid bits in all the tags. */ 863 /* Clear the valid bits in all the tags. */
875 for (i = 0; i < (1 << 15); i += (1 << 5)) { 864 for (addr = 0; addr < icache_size; addr += icache_line_size) {
876 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 865 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
877 "membar #Sync" 866 "membar #Sync"
878 : /* no outputs */ 867 : /* no outputs */
879 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG)); 868 : "r" (addr | (2 << 3)),
869 "i" (ASI_IC_TAG));
880 } 870 }
881} 871}
882 872
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void)
904 894
905static void cheetah_flush_dcache(void) 895static void cheetah_flush_dcache(void)
906{ 896{
907 unsigned long i; 897 unsigned int dcache_size, dcache_line_size;
898 unsigned long addr;
908 899
909 for (i = 0; i < (1 << 16); i += (1 << 5)) { 900 dcache_size = local_cpu_data().dcache_size;
901 dcache_line_size = local_cpu_data().dcache_line_size;
902
903 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
910 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 904 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
911 "membar #Sync" 905 "membar #Sync"
912 : /* no outputs */ 906 : /* no outputs */
913 : "r" (i), "i" (ASI_DCACHE_TAG)); 907 : "r" (addr), "i" (ASI_DCACHE_TAG));
914 } 908 }
915} 909}
916 910
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void)
921 */ 915 */
922static void cheetah_plus_zap_dcache_parity(void) 916static void cheetah_plus_zap_dcache_parity(void)
923{ 917{
924 unsigned long i; 918 unsigned int dcache_size, dcache_line_size;
919 unsigned long addr;
920
921 dcache_size = local_cpu_data().dcache_size;
922 dcache_line_size = local_cpu_data().dcache_line_size;
925 923
926 for (i = 0; i < (1 << 16); i += (1 << 5)) { 924 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
927 unsigned long tag = (i >> 14); 925 unsigned long tag = (addr >> 14);
928 unsigned long j; 926 unsigned long line;
929 927
930 __asm__ __volatile__("membar #Sync\n\t" 928 __asm__ __volatile__("membar #Sync\n\t"
931 "stxa %0, [%1] %2\n\t" 929 "stxa %0, [%1] %2\n\t"
932 "membar #Sync" 930 "membar #Sync"
933 : /* no outputs */ 931 : /* no outputs */
934 : "r" (tag), "r" (i), 932 : "r" (tag), "r" (addr),
935 "i" (ASI_DCACHE_UTAG)); 933 "i" (ASI_DCACHE_UTAG));
936 for (j = i; j < i + (1 << 5); j += (1 << 3)) 934 for (line = addr; line < addr + dcache_line_size; line += 8)
937 __asm__ __volatile__("membar #Sync\n\t" 935 __asm__ __volatile__("membar #Sync\n\t"
938 "stxa %%g0, [%0] %1\n\t" 936 "stxa %%g0, [%0] %1\n\t"
939 "membar #Sync" 937 "membar #Sync"
940 : /* no outputs */ 938 : /* no outputs */
941 : "r" (j), "i" (ASI_DCACHE_DATA)); 939 : "r" (line),
940 "i" (ASI_DCACHE_DATA));
942 } 941 }
943} 942}
944 943
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr)
1332/* Return non-zero if PADDR is a valid physical memory address. */ 1331/* Return non-zero if PADDR is a valid physical memory address. */
1333static int cheetah_check_main_memory(unsigned long paddr) 1332static int cheetah_check_main_memory(unsigned long paddr)
1334{ 1333{
1335 int i; 1334 unsigned long vaddr = PAGE_OFFSET + paddr;
1336 1335
1337 for (i = 0; ; i++) { 1336 if (vaddr > (unsigned long) high_memory)
1338 if (sp_banks[i].num_bytes == 0) 1337 return 0;
1339 break; 1338
1340 if (paddr >= sp_banks[i].base_addr && 1339 return kern_addr_valid(vaddr);
1341 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1342 return 1;
1343 }
1344 return 0;
1345} 1340}
1346 1341
1347void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) 1342void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
1596 /* OK, usermode access. */ 1591 /* OK, usermode access. */
1597 recoverable = 1; 1592 recoverable = 1;
1598 } else { 1593 } else {
1599 unsigned long g2 = regs->u_regs[UREG_G2]; 1594 const struct exception_table_entry *entry;
1600 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1601 1595
1602 if (fixup != 0UL) { 1596 entry = search_exception_tables(regs->tpc);
1597 if (entry) {
1603 /* OK, kernel access to userspace. */ 1598 /* OK, kernel access to userspace. */
1604 recoverable = 1; 1599 recoverable = 1;
1605 1600
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
1618 * recoverable condition. 1613 * recoverable condition.
1619 */ 1614 */
1620 if (recoverable) { 1615 if (recoverable) {
1621 regs->tpc = fixup; 1616 regs->tpc = entry->fixup;
1622 regs->tnpc = regs->tpc + 4; 1617 regs->tnpc = regs->tpc + 4;
1623 regs->u_regs[UREG_G2] = g2;
1624 } 1618 }
1625 } 1619 }
1626 } 1620 }
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index da48400bcc95..1f5b5b708ce7 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -6,13 +6,6 @@
6 6
7 .text 7 .text
8 8
9kernel_unaligned_trap_fault:
10 call kernel_mna_trap_fault
11 nop
12 retl
13 nop
14 .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
15
16 .globl __do_int_store 9 .globl __do_int_store
17__do_int_store: 10__do_int_store:
18 rd %asi, %o4 11 rd %asi, %o4
@@ -51,24 +44,24 @@ __do_int_store:
510: 440:
52 wr %o4, 0x0, %asi 45 wr %o4, 0x0, %asi
53 retl 46 retl
54 nop 47 mov 0, %o0
55 .size __do_int_store, .-__do_int_store 48 .size __do_int_store, .-__do_int_store
56 49
57 .section __ex_table 50 .section __ex_table
58 .word 4b, kernel_unaligned_trap_fault 51 .word 4b, __retl_efault
59 .word 5b, kernel_unaligned_trap_fault 52 .word 5b, __retl_efault
60 .word 6b, kernel_unaligned_trap_fault 53 .word 6b, __retl_efault
61 .word 7b, kernel_unaligned_trap_fault 54 .word 7b, __retl_efault
62 .word 8b, kernel_unaligned_trap_fault 55 .word 8b, __retl_efault
63 .word 9b, kernel_unaligned_trap_fault 56 .word 9b, __retl_efault
64 .word 10b, kernel_unaligned_trap_fault 57 .word 10b, __retl_efault
65 .word 11b, kernel_unaligned_trap_fault 58 .word 11b, __retl_efault
66 .word 12b, kernel_unaligned_trap_fault 59 .word 12b, __retl_efault
67 .word 13b, kernel_unaligned_trap_fault 60 .word 13b, __retl_efault
68 .word 14b, kernel_unaligned_trap_fault 61 .word 14b, __retl_efault
69 .word 15b, kernel_unaligned_trap_fault 62 .word 15b, __retl_efault
70 .word 16b, kernel_unaligned_trap_fault 63 .word 16b, __retl_efault
71 .word 17b, kernel_unaligned_trap_fault 64 .word 17b, __retl_efault
72 .previous 65 .previous
73 66
74 .globl do_int_load 67 .globl do_int_load
@@ -133,21 +126,21 @@ do_int_load:
1330: 1260:
134 wr %o5, 0x0, %asi 127 wr %o5, 0x0, %asi
135 retl 128 retl
136 nop 129 mov 0, %o0
137 .size __do_int_load, .-__do_int_load 130 .size __do_int_load, .-__do_int_load
138 131
139 .section __ex_table 132 .section __ex_table
140 .word 4b, kernel_unaligned_trap_fault 133 .word 4b, __retl_efault
141 .word 5b, kernel_unaligned_trap_fault 134 .word 5b, __retl_efault
142 .word 6b, kernel_unaligned_trap_fault 135 .word 6b, __retl_efault
143 .word 7b, kernel_unaligned_trap_fault 136 .word 7b, __retl_efault
144 .word 8b, kernel_unaligned_trap_fault 137 .word 8b, __retl_efault
145 .word 9b, kernel_unaligned_trap_fault 138 .word 9b, __retl_efault
146 .word 10b, kernel_unaligned_trap_fault 139 .word 10b, __retl_efault
147 .word 11b, kernel_unaligned_trap_fault 140 .word 11b, __retl_efault
148 .word 12b, kernel_unaligned_trap_fault 141 .word 12b, __retl_efault
149 .word 13b, kernel_unaligned_trap_fault 142 .word 13b, __retl_efault
150 .word 14b, kernel_unaligned_trap_fault 143 .word 14b, __retl_efault
151 .word 15b, kernel_unaligned_trap_fault 144 .word 15b, __retl_efault
152 .word 16b, kernel_unaligned_trap_fault 145 .word 16b, __retl_efault
153 .previous 146 .previous
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 42718f6a7d36..70faf630603b 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
180 die_if_kernel(str, regs); 180 die_if_kernel(str, regs);
181} 181}
182 182
183extern void do_int_load(unsigned long *dest_reg, int size, 183extern int do_int_load(unsigned long *dest_reg, int size,
184 unsigned long *saddr, int is_signed, int asi); 184 unsigned long *saddr, int is_signed, int asi);
185 185
186extern void __do_int_store(unsigned long *dst_addr, int size, 186extern int __do_int_store(unsigned long *dst_addr, int size,
187 unsigned long src_val, int asi); 187 unsigned long src_val, int asi);
188 188
189static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, 189static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
190 struct pt_regs *regs, int asi, int orig_asi) 190 struct pt_regs *regs, int asi, int orig_asi)
191{ 191{
192 unsigned long zero = 0; 192 unsigned long zero = 0;
193 unsigned long *src_val_p = &zero; 193 unsigned long *src_val_p = &zero;
@@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
219 break; 219 break;
220 }; 220 };
221 } 221 }
222 __do_int_store(dst_addr, size, src_val, asi); 222 return __do_int_store(dst_addr, size, src_val, asi);
223} 223}
224 224
225static inline void advance(struct pt_regs *regs) 225static inline void advance(struct pt_regs *regs)
@@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn)
242 return !floating_point_load_or_store_p(insn); 242 return !floating_point_load_or_store_p(insn);
243} 243}
244 244
245void kernel_mna_trap_fault(void) 245static void kernel_mna_trap_fault(void)
246{ 246{
247 struct pt_regs *regs = current_thread_info()->kern_una_regs; 247 struct pt_regs *regs = current_thread_info()->kern_una_regs;
248 unsigned int insn = current_thread_info()->kern_una_insn; 248 unsigned int insn = current_thread_info()->kern_una_insn;
249 unsigned long g2 = regs->u_regs[UREG_G2]; 249 const struct exception_table_entry *entry;
250 unsigned long fixup = search_extables_range(regs->tpc, &g2);
251 250
252 if (!fixup) { 251 entry = search_exception_tables(regs->tpc);
252 if (!entry) {
253 unsigned long address; 253 unsigned long address;
254 254
255 address = compute_effective_address(regs, insn, 255 address = compute_effective_address(regs, insn,
@@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void)
270 die_if_kernel("Oops", regs); 270 die_if_kernel("Oops", regs);
271 /* Not reached */ 271 /* Not reached */
272 } 272 }
273 regs->tpc = fixup; 273 regs->tpc = entry->fixup;
274 regs->tnpc = regs->tpc + 4; 274 regs->tnpc = regs->tpc + 4;
275 regs->u_regs [UREG_G2] = g2;
276 275
277 regs->tstate &= ~TSTATE_ASI; 276 regs->tstate &= ~TSTATE_ASI;
278 regs->tstate |= (ASI_AIUS << 24UL); 277 regs->tstate |= (ASI_AIUS << 24UL);
@@ -294,8 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
294 293
295 kernel_mna_trap_fault(); 294 kernel_mna_trap_fault();
296 } else { 295 } else {
297 unsigned long addr; 296 unsigned long addr, *reg_addr;
298 int orig_asi, asi; 297 int orig_asi, asi, err;
299 298
300 addr = compute_effective_address(regs, insn, 299 addr = compute_effective_address(regs, insn,
301 ((insn >> 25) & 0x1f)); 300 ((insn >> 25) & 0x1f));
@@ -319,11 +318,12 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
319 }; 318 };
320 switch (dir) { 319 switch (dir) {
321 case load: 320 case load:
322 do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 321 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
323 size, (unsigned long *) addr, 322 err = do_int_load(reg_addr, size,
324 decode_signedness(insn), asi); 323 (unsigned long *) addr,
325 if (unlikely(asi != orig_asi)) { 324 decode_signedness(insn), asi);
326 unsigned long val_in = *(unsigned long *) addr; 325 if (likely(!err) && unlikely(asi != orig_asi)) {
326 unsigned long val_in = *reg_addr;
327 switch (size) { 327 switch (size) {
328 case 2: 328 case 2:
329 val_in = swab16(val_in); 329 val_in = swab16(val_in);
@@ -339,21 +339,24 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
339 BUG(); 339 BUG();
340 break; 340 break;
341 }; 341 };
342 *(unsigned long *) addr = val_in; 342 *reg_addr = val_in;
343 } 343 }
344 break; 344 break;
345 345
346 case store: 346 case store:
347 do_int_store(((insn>>25)&0x1f), size, 347 err = do_int_store(((insn>>25)&0x1f), size,
348 (unsigned long *) addr, regs, 348 (unsigned long *) addr, regs,
349 asi, orig_asi); 349 asi, orig_asi);
350 break; 350 break;
351 351
352 default: 352 default:
353 panic("Impossible kernel unaligned trap."); 353 panic("Impossible kernel unaligned trap.");
354 /* Not reached... */ 354 /* Not reached... */
355 } 355 }
356 advance(regs); 356 if (unlikely(err))
357 kernel_mna_trap_fault();
358 else
359 advance(regs);
357 } 360 }
358} 361}
359 362
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 9080e7cd4bb0..0340041f6143 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void)
208 impl = ((ver >> 32) & 0xffff); 208 impl = ((ver >> 32) & 0xffff);
209 209
210 if (manuf == CHEETAH_MANUF && 210 if (manuf == CHEETAH_MANUF &&
211 (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { 211 (impl == CHEETAH_IMPL ||
212 impl == CHEETAH_PLUS_IMPL ||
213 impl == JAGUAR_IMPL ||
214 impl == PANTHER_IMPL)) {
212 struct cpufreq_driver *driver; 215 struct cpufreq_driver *driver;
213 216
214 ret = -ENOMEM; 217 ret = -ENOMEM;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be39378..2af0cf0a8640 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
9jiffies = jiffies_64; 9jiffies = jiffies_64;
10SECTIONS 10SECTIONS
11{ 11{
12 swapper_pmd_dir = 0x0000000000402000; 12 swapper_low_pmd_dir = 0x0000000000402000;
13 empty_pg_dir = 0x0000000000403000;
14 . = 0x4000; 13 . = 0x4000;
15 .text 0x0000000000404000 : 14 .text 0x0000000000404000 :
16 { 15 {
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
index 09cbbaa0ebf4..e1264650ca7a 100644
--- a/arch/sparc64/lib/strncpy_from_user.S
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -125,15 +125,11 @@ __strncpy_from_user:
125 add %o2, %o3, %o0 125 add %o2, %o3, %o0
126 .size __strncpy_from_user, .-__strncpy_from_user 126 .size __strncpy_from_user, .-__strncpy_from_user
127 127
128 .section .fixup,#alloc,#execinstr
129 .align 4
1304: retl
131 mov -EFAULT, %o0
132
133 .section __ex_table,#alloc 128 .section __ex_table,#alloc
134 .align 4 129 .align 4
135 .word 60b, 4b 130 .word 60b, __retl_efault
136 .word 61b, 4b 131 .word 61b, __retl_efault
137 .word 62b, 4b 132 .word 62b, __retl_efault
138 .word 63b, 4b 133 .word 63b, __retl_efault
139 .word 64b, 4b 134 .word 64b, __retl_efault
135 .previous
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
index 0278e34125db..19d1fdb17d0e 100644
--- a/arch/sparc64/lib/user_fixup.c
+++ b/arch/sparc64/lib/user_fixup.c
@@ -11,61 +11,56 @@
11 11
12/* Calculating the exact fault address when using 12/* Calculating the exact fault address when using
13 * block loads and stores can be very complicated. 13 * block loads and stores can be very complicated.
14 *
14 * Instead of trying to be clever and handling all 15 * Instead of trying to be clever and handling all
15 * of the cases, just fix things up simply here. 16 * of the cases, just fix things up simply here.
16 */ 17 */
17 18
18unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) 19static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
19{ 20{
20 char *dst = to; 21 unsigned long fault_addr = current_thread_info()->fault_address;
21 const char __user *src = from; 22 unsigned long end = start + size;
22 23
23 while (size) { 24 if (fault_addr < start || fault_addr >= end) {
24 if (__get_user(*dst, src)) 25 *offset = 0;
25 break; 26 } else {
26 dst++; 27 *offset = start - fault_addr;
27 src++; 28 size = end - fault_addr;
28 size--;
29 } 29 }
30 return size;
31}
30 32
31 if (size) 33unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
32 memset(dst, 0, size); 34{
35 unsigned long offset;
36
37 size = compute_size((unsigned long) from, size, &offset);
38 if (likely(size))
39 memset(to + offset, 0, size);
33 40
34 return size; 41 return size;
35} 42}
36 43
37unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) 44unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
38{ 45{
39 char __user *dst = to; 46 unsigned long offset;
40 const char *src = from;
41
42 while (size) {
43 if (__put_user(*src, dst))
44 break;
45 dst++;
46 src++;
47 size--;
48 }
49 47
50 return size; 48 return compute_size((unsigned long) to, size, &offset);
51} 49}
52 50
53unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) 51unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
54{ 52{
55 char __user *dst = to; 53 unsigned long fault_addr = current_thread_info()->fault_address;
56 char __user *src = from; 54 unsigned long start = (unsigned long) to;
55 unsigned long end = start + size;
57 56
58 while (size) { 57 if (fault_addr >= start && fault_addr < end)
59 char tmp; 58 return end - fault_addr;
60 59
61 if (__get_user(tmp, src)) 60 start = (unsigned long) from;
62 break; 61 end = start + size;
63 if (__put_user(tmp, dst)) 62 if (fault_addr >= start && fault_addr < end)
64 break; 63 return end - fault_addr;
65 dst++;
66 src++;
67 size--;
68 }
69 64
70 return size; 65 return size;
71} 66}
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index cda87333a77b..9d0960e69f48 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
5EXTRA_AFLAGS := -ansi 5EXTRA_AFLAGS := -ansi
6EXTRA_CFLAGS := -Werror 6EXTRA_CFLAGS := -Werror
7 7
8obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o 8obj-y := ultra.o tlb.o fault.o init.o generic.o
9 9
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
deleted file mode 100644
index ec334297ff4f..000000000000
--- a/arch/sparc64/mm/extable.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * linux/arch/sparc64/mm/extable.c
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <asm/uaccess.h>
8
9extern const struct exception_table_entry __start___ex_table[];
10extern const struct exception_table_entry __stop___ex_table[];
11
12void sort_extable(struct exception_table_entry *start,
13 struct exception_table_entry *finish)
14{
15}
16
17/* Caller knows they are in a range if ret->fixup == 0 */
18const struct exception_table_entry *
19search_extable(const struct exception_table_entry *start,
20 const struct exception_table_entry *last,
21 unsigned long value)
22{
23 const struct exception_table_entry *walk;
24
25 /* Single insn entries are encoded as:
26 * word 1: insn address
27 * word 2: fixup code address
28 *
29 * Range entries are encoded as:
30 * word 1: first insn address
31 * word 2: 0
32 * word 3: last insn address + 4 bytes
33 * word 4: fixup code address
34 *
35 * See asm/uaccess.h for more details.
36 */
37
38 /* 1. Try to find an exact match. */
39 for (walk = start; walk <= last; walk++) {
40 if (walk->fixup == 0) {
41 /* A range entry, skip both parts. */
42 walk++;
43 continue;
44 }
45
46 if (walk->insn == value)
47 return walk;
48 }
49
50 /* 2. Try to find a range match. */
51 for (walk = start; walk <= (last - 1); walk++) {
52 if (walk->fixup)
53 continue;
54
55 if (walk[0].insn <= value && walk[1].insn > value)
56 return walk;
57
58 walk++;
59 }
60
61 return NULL;
62}
63
64/* Special extable search, which handles ranges. Returns fixup */
65unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
66{
67 const struct exception_table_entry *entry;
68
69 entry = search_exception_tables(addr);
70 if (!entry)
71 return 0;
72
73 /* Inside range? Fix g2 and return correct fixup */
74 if (!entry->fixup) {
75 *g2 = (addr - entry->insn) / 4;
76 return (entry + 1)->fixup;
77 }
78
79 return entry->fixup;
80}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index db1e3310e907..31fbc67719a1 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -32,8 +32,6 @@
32 32
33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) 33#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
34 34
35extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
36
37/* 35/*
38 * To debug kernel to catch accesses to certain virtual/physical addresses. 36 * To debug kernel to catch accesses to certain virtual/physical addresses.
39 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. 37 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
71 : "memory"); 69 : "memory");
72} 70}
73 71
74/* Nice, simple, prom library does all the sweating for us. ;) */
75unsigned long __init prom_probe_memory (void)
76{
77 register struct linux_mlist_p1275 *mlist;
78 register unsigned long bytes, base_paddr, tally;
79 register int i;
80
81 i = 0;
82 mlist = *prom_meminfo()->p1275_available;
83 bytes = tally = mlist->num_bytes;
84 base_paddr = mlist->start_adr;
85
86 sp_banks[0].base_addr = base_paddr;
87 sp_banks[0].num_bytes = bytes;
88
89 while (mlist->theres_more != (void *) 0) {
90 i++;
91 mlist = mlist->theres_more;
92 bytes = mlist->num_bytes;
93 tally += bytes;
94 if (i >= SPARC_PHYS_BANKS-1) {
95 printk ("The machine has more banks than "
96 "this kernel can support\n"
97 "Increase the SPARC_PHYS_BANKS "
98 "setting (currently %d)\n",
99 SPARC_PHYS_BANKS);
100 i = SPARC_PHYS_BANKS-1;
101 break;
102 }
103
104 sp_banks[i].base_addr = mlist->start_adr;
105 sp_banks[i].num_bytes = mlist->num_bytes;
106 }
107
108 i++;
109 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
110 sp_banks[i].num_bytes = 0;
111
112 /* Now mask all bank sizes on a page boundary, it is all we can
113 * use anyways.
114 */
115 for (i = 0; sp_banks[i].num_bytes != 0; i++)
116 sp_banks[i].num_bytes &= PAGE_MASK;
117
118 return tally;
119}
120
121static void __kprobes unhandled_fault(unsigned long address, 72static void __kprobes unhandled_fault(unsigned long address,
122 struct task_struct *tsk, 73 struct task_struct *tsk,
123 struct pt_regs *regs) 74 struct pt_regs *regs)
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
242static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 193static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
243 unsigned int insn, unsigned long address) 194 unsigned int insn, unsigned long address)
244{ 195{
245 unsigned long g2;
246 unsigned char asi = ASI_P; 196 unsigned char asi = ASI_P;
247 197
248 if ((!insn) && (regs->tstate & TSTATE_PRIV)) 198 if ((!insn) && (regs->tstate & TSTATE_PRIV))
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
273 } 223 }
274 } 224 }
275 225
276 g2 = regs->u_regs[UREG_G2];
277
278 /* Is this in ex_table? */ 226 /* Is this in ex_table? */
279 if (regs->tstate & TSTATE_PRIV) { 227 if (regs->tstate & TSTATE_PRIV) {
280 unsigned long fixup; 228 const struct exception_table_entry *entry;
281 229
282 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { 230 if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
283 if (insn & 0x2000) 231 if (insn & 0x2000)
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
288 236
289 /* Look in asi.h: All _S asis have LS bit set */ 237 /* Look in asi.h: All _S asis have LS bit set */
290 if ((asi & 0x1) && 238 if ((asi & 0x1) &&
291 (fixup = search_extables_range(regs->tpc, &g2))) { 239 (entry = search_exception_tables(regs->tpc))) {
292 regs->tpc = fixup; 240 regs->tpc = entry->fixup;
293 regs->tnpc = regs->tpc + 4; 241 regs->tnpc = regs->tpc + 4;
294 regs->u_regs[UREG_G2] = g2;
295 return; 242 return;
296 } 243 }
297 } else { 244 } else {
@@ -461,7 +408,7 @@ good_area:
461 } 408 }
462 409
463 up_read(&mm->mmap_sem); 410 up_read(&mm->mmap_sem);
464 goto fault_done; 411 return;
465 412
466 /* 413 /*
467 * Something tried to access memory that isn't in our memory map.. 414 * Something tried to access memory that isn't in our memory map..
@@ -473,8 +420,7 @@ bad_area:
473 420
474handle_kernel_fault: 421handle_kernel_fault:
475 do_kernel_fault(regs, si_code, fault_code, insn, address); 422 do_kernel_fault(regs, si_code, fault_code, insn, address);
476 423 return;
477 goto fault_done;
478 424
479/* 425/*
480 * We ran out of memory, or some other thing happened to us that made 426 * We ran out of memory, or some other thing happened to us that made
@@ -505,9 +451,4 @@ do_sigbus:
505 /* Kernel mode? Handle exceptions or die */ 451 /* Kernel mode? Handle exceptions or die */
506 if (regs->tstate & TSTATE_PRIV) 452 if (regs->tstate & TSTATE_PRIV)
507 goto handle_kernel_fault; 453 goto handle_kernel_fault;
508
509fault_done:
510 /* These values are no longer needed, clear them. */
511 set_thread_fault_code(0);
512 current_thread_info()->fault_address = 0;
513} 454}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index fdb1ebb308c9..5db50524f20d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -20,6 +20,8 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cache.h>
24#include <linux/sort.h>
23 25
24#include <asm/head.h> 26#include <asm/head.h>
25#include <asm/system.h> 27#include <asm/system.h>
@@ -40,24 +42,80 @@
40 42
41extern void device_scan(void); 43extern void device_scan(void);
42 44
43struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; 45#define MAX_BANKS 32
44 46
45unsigned long *sparc64_valid_addr_bitmap; 47static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
48static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
49static int pavail_ents __initdata;
50static int pavail_rescan_ents __initdata;
51
52static int cmp_p64(const void *a, const void *b)
53{
54 const struct linux_prom64_registers *x = a, *y = b;
55
56 if (x->phys_addr > y->phys_addr)
57 return 1;
58 if (x->phys_addr < y->phys_addr)
59 return -1;
60 return 0;
61}
62
63static void __init read_obp_memory(const char *property,
64 struct linux_prom64_registers *regs,
65 int *num_ents)
66{
67 int node = prom_finddevice("/memory");
68 int prop_size = prom_getproplen(node, property);
69 int ents, ret, i;
70
71 ents = prop_size / sizeof(struct linux_prom64_registers);
72 if (ents > MAX_BANKS) {
73 prom_printf("The machine has more %s property entries than "
74 "this kernel can support (%d).\n",
75 property, MAX_BANKS);
76 prom_halt();
77 }
78
79 ret = prom_getproperty(node, property, (char *) regs, prop_size);
80 if (ret == -1) {
81 prom_printf("Couldn't get %s property from /memory.\n");
82 prom_halt();
83 }
84
85 *num_ents = ents;
86
87 /* Sanitize what we got from the firmware, by page aligning
88 * everything.
89 */
90 for (i = 0; i < ents; i++) {
91 unsigned long base, size;
92
93 base = regs[i].phys_addr;
94 size = regs[i].reg_size;
95
96 size &= PAGE_MASK;
97 if (base & ~PAGE_MASK) {
98 unsigned long new_base = PAGE_ALIGN(base);
99
100 size -= new_base - base;
101 if ((long) size < 0L)
102 size = 0UL;
103 base = new_base;
104 }
105 regs[i].phys_addr = base;
106 regs[i].reg_size = size;
107 }
108 sort(regs, ents, sizeof(struct linux_prom64_registers),
109 cmp_p64, NULL);
110}
111
112unsigned long *sparc64_valid_addr_bitmap __read_mostly;
46 113
47/* Ugly, but necessary... -DaveM */ 114/* Ugly, but necessary... -DaveM */
48unsigned long phys_base; 115unsigned long phys_base __read_mostly;
49unsigned long kern_base; 116unsigned long kern_base __read_mostly;
50unsigned long kern_size; 117unsigned long kern_size __read_mostly;
51unsigned long pfn_base; 118unsigned long pfn_base __read_mostly;
52
53/* This is even uglier. We have a problem where the kernel may not be
54 * located at phys_base. However, initial __alloc_bootmem() calls need to
55 * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
56 * those page mappings wont work. Things are ok after inherit_prom_mappings
57 * is called though. Dave says he'll clean this up some other time.
58 * -- BenC
59 */
60static unsigned long bootmap_base;
61 119
62/* get_new_mmu_context() uses "cache + 1". */ 120/* get_new_mmu_context() uses "cache + 1". */
63DEFINE_SPINLOCK(ctx_alloc_lock); 121DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -73,7 +131,7 @@ extern unsigned long sparc_ramdisk_image64;
73extern unsigned int sparc_ramdisk_image; 131extern unsigned int sparc_ramdisk_image;
74extern unsigned int sparc_ramdisk_size; 132extern unsigned int sparc_ramdisk_size;
75 133
76struct page *mem_map_zero; 134struct page *mem_map_zero __read_mostly;
77 135
78int bigkernel = 0; 136int bigkernel = 0;
79 137
@@ -179,8 +237,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
179 : "g1", "g7"); 237 : "g1", "g7");
180} 238}
181 239
182extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
183
184void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 240void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
185{ 241{
186 struct page *page; 242 struct page *page;
@@ -207,10 +263,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
207 263
208 put_cpu(); 264 put_cpu();
209 } 265 }
210
211 if (get_thread_fault_code())
212 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
213 address, pte, get_thread_fault_code());
214} 266}
215 267
216void flush_dcache_page(struct page *page) 268void flush_dcache_page(struct page *page)
@@ -309,6 +361,7 @@ struct linux_prom_translation {
309 unsigned long size; 361 unsigned long size;
310 unsigned long data; 362 unsigned long data;
311}; 363};
364static struct linux_prom_translation prom_trans[512] __initdata;
312 365
313extern unsigned long prom_boot_page; 366extern unsigned long prom_boot_page;
314extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); 367extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -318,14 +371,63 @@ extern void register_prom_callbacks(void);
318/* Exported for SMP bootup purposes. */ 371/* Exported for SMP bootup purposes. */
319unsigned long kern_locked_tte_data; 372unsigned long kern_locked_tte_data;
320 373
321void __init early_pgtable_allocfail(char *type) 374/* Exported for kernel TLB miss handling in ktlb.S */
375unsigned long prom_pmd_phys __read_mostly;
376unsigned int swapper_pgd_zero __read_mostly;
377
378/* Allocate power-of-2 aligned chunks from the end of the
379 * kernel image. Return physical address.
380 */
381static inline unsigned long early_alloc_phys(unsigned long size)
322{ 382{
323 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); 383 unsigned long base;
324 prom_halt(); 384
385 BUILD_BUG_ON(size & (size - 1));
386
387 kern_size = (kern_size + (size - 1)) & ~(size - 1);
388 base = kern_base + kern_size;
389 kern_size += size;
390
391 return base;
392}
393
394static inline unsigned long load_phys32(unsigned long pa)
395{
396 unsigned long val;
397
398 __asm__ __volatile__("lduwa [%1] %2, %0"
399 : "=&r" (val)
400 : "r" (pa), "i" (ASI_PHYS_USE_EC));
401
402 return val;
403}
404
405static inline unsigned long load_phys64(unsigned long pa)
406{
407 unsigned long val;
408
409 __asm__ __volatile__("ldxa [%1] %2, %0"
410 : "=&r" (val)
411 : "r" (pa), "i" (ASI_PHYS_USE_EC));
412
413 return val;
414}
415
416static inline void store_phys32(unsigned long pa, unsigned long val)
417{
418 __asm__ __volatile__("stwa %0, [%1] %2"
419 : /* no outputs */
420 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
421}
422
423static inline void store_phys64(unsigned long pa, unsigned long val)
424{
425 __asm__ __volatile__("stxa %0, [%1] %2"
426 : /* no outputs */
427 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
325} 428}
326 429
327#define BASE_PAGE_SIZE 8192 430#define BASE_PAGE_SIZE 8192
328static pmd_t *prompmd;
329 431
330/* 432/*
331 * Translate PROM's mapping we capture at boot time into physical address. 433 * Translate PROM's mapping we capture at boot time into physical address.
@@ -333,278 +435,172 @@ static pmd_t *prompmd;
333 */ 435 */
334unsigned long prom_virt_to_phys(unsigned long promva, int *error) 436unsigned long prom_virt_to_phys(unsigned long promva, int *error)
335{ 437{
336 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); 438 unsigned long pmd_phys = (prom_pmd_phys +
337 pte_t *ptep; 439 ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
440 unsigned long pte_phys;
441 pmd_t pmd_ent;
442 pte_t pte_ent;
338 unsigned long base; 443 unsigned long base;
339 444
340 if (pmd_none(*pmdp)) { 445 pmd_val(pmd_ent) = load_phys32(pmd_phys);
446 if (pmd_none(pmd_ent)) {
341 if (error) 447 if (error)
342 *error = 1; 448 *error = 1;
343 return(0); 449 return 0;
344 } 450 }
345 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); 451
346 if (!pte_present(*ptep)) { 452 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
453 pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
454 pte_val(pte_ent) = load_phys64(pte_phys);
455 if (!pte_present(pte_ent)) {
347 if (error) 456 if (error)
348 *error = 1; 457 *error = 1;
349 return(0); 458 return 0;
350 } 459 }
351 if (error) { 460 if (error) {
352 *error = 0; 461 *error = 0;
353 return(pte_val(*ptep)); 462 return pte_val(pte_ent);
354 } 463 }
355 base = pte_val(*ptep) & _PAGE_PADDR; 464 base = pte_val(pte_ent) & _PAGE_PADDR;
356 return(base + (promva & (BASE_PAGE_SIZE - 1))); 465 return (base + (promva & (BASE_PAGE_SIZE - 1)));
357} 466}
358 467
359static void inherit_prom_mappings(void) 468/* The obp translations are saved based on 8k pagesize, since obp can
469 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
470 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
471 * scheme (also, see rant in inherit_locked_prom_mappings()).
472 */
473static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data)
360{ 474{
361 struct linux_prom_translation *trans; 475 unsigned long vaddr;
362 unsigned long phys_page, tte_vaddr, tte_data;
363 void (*remap_func)(unsigned long, unsigned long, int);
364 pmd_t *pmdp;
365 pte_t *ptep;
366 int node, n, i, tsz;
367 extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
368 476
369 node = prom_finddevice("/virtual-memory"); 477 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
370 n = prom_getproplen(node, "translations"); 478 unsigned long val, pte_phys, pmd_phys;
371 if (n == 0 || n == -1) { 479 pmd_t pmd_ent;
372 prom_printf("Couldn't get translation property\n"); 480 int i;
373 prom_halt();
374 }
375 n += 5 * sizeof(struct linux_prom_translation);
376 for (tsz = 1; tsz < n; tsz <<= 1)
377 /* empty */;
378 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
379 if (trans == NULL) {
380 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
381 prom_halt();
382 }
383 memset(trans, 0, tsz);
384 481
385 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 482 pmd_phys = (prom_pmd_phys +
386 prom_printf("Couldn't get translation property\n"); 483 (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
387 prom_halt(); 484 pmd_val(pmd_ent) = load_phys32(pmd_phys);
388 } 485 if (pmd_none(pmd_ent)) {
389 n = n / sizeof(*trans); 486 pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
390 487
391 /* 488 for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
392 * The obp translations are saved based on 8k pagesize, since obp can 489 store_phys64(pte_phys+i*sizeof(pte_t),0);
393 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
394 * ie obp range, are handled in entry.S and do not use the vpte scheme
395 * (see rant in inherit_locked_prom_mappings()).
396 */
397#define OBP_PMD_SIZE 2048
398 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
399 if (prompmd == NULL)
400 early_pgtable_allocfail("pmd");
401 memset(prompmd, 0, OBP_PMD_SIZE);
402 for (i = 0; i < n; i++) {
403 unsigned long vaddr;
404
405 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
406 for (vaddr = trans[i].virt;
407 ((vaddr < trans[i].virt + trans[i].size) &&
408 (vaddr < HI_OBP_ADDRESS));
409 vaddr += BASE_PAGE_SIZE) {
410 unsigned long val;
411
412 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
413 if (pmd_none(*pmdp)) {
414 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
415 BASE_PAGE_SIZE,
416 bootmap_base);
417 if (ptep == NULL)
418 early_pgtable_allocfail("pte");
419 memset(ptep, 0, BASE_PAGE_SIZE);
420 pmd_set(pmdp, ptep);
421 }
422 ptep = (pte_t *)__pmd_page(*pmdp) +
423 ((vaddr >> 13) & 0x3ff);
424 490
425 val = trans[i].data; 491 pmd_val(pmd_ent) = pte_phys >> 11UL;
492 store_phys32(pmd_phys, pmd_val(pmd_ent));
493 }
426 494
427 /* Clear diag TTE bits. */ 495 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
428 if (tlb_type == spitfire) 496 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
429 val &= ~0x0003fe0000000000UL;
430 497
431 set_pte_at(&init_mm, vaddr, 498 val = data;
432 ptep, __pte(val | _PAGE_MODIFIED));
433 trans[i].data += BASE_PAGE_SIZE;
434 }
435 }
436 }
437 phys_page = __pa(prompmd);
438 obp_iaddr_patch[0] |= (phys_page >> 10);
439 obp_iaddr_patch[1] |= (phys_page & 0x3ff);
440 flushi((long)&obp_iaddr_patch[0]);
441 obp_daddr_patch[0] |= (phys_page >> 10);
442 obp_daddr_patch[1] |= (phys_page & 0x3ff);
443 flushi((long)&obp_daddr_patch[0]);
444 499
445 /* Now fixup OBP's idea about where we really are mapped. */ 500 /* Clear diag TTE bits. */
446 prom_printf("Remapping the kernel... "); 501 if (tlb_type == spitfire)
502 val &= ~0x0003fe0000000000UL;
447 503
448 /* Spitfire Errata #32 workaround */ 504 store_phys64(pte_phys, val | _PAGE_MODIFIED);
449 /* NOTE: Using plain zero for the context value is
450 * correct here, we are not using the Linux trap
451 * tables yet so we should not use the special
452 * UltraSPARC-III+ page size encodings yet.
453 */
454 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
455 "flush %%g6"
456 : /* No outputs */
457 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
458
459 switch (tlb_type) {
460 default:
461 case spitfire:
462 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
463 break;
464
465 case cheetah:
466 case cheetah_plus:
467 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
468 break;
469 };
470
471 phys_page &= _PAGE_PADDR;
472 phys_page += ((unsigned long)&prom_boot_page -
473 (unsigned long)KERNBASE);
474 505
475 if (tlb_type == spitfire) { 506 data += BASE_PAGE_SIZE;
476 /* Lock this into i/d tlb entry 59 */
477 __asm__ __volatile__(
478 "stxa %%g0, [%2] %3\n\t"
479 "stxa %0, [%1] %4\n\t"
480 "membar #Sync\n\t"
481 "flush %%g6\n\t"
482 "stxa %%g0, [%2] %5\n\t"
483 "stxa %0, [%1] %6\n\t"
484 "membar #Sync\n\t"
485 "flush %%g6"
486 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
487 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
488 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
489 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
490 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
491 : "memory");
492 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
493 /* Lock this into i/d tlb-0 entry 11 */
494 __asm__ __volatile__(
495 "stxa %%g0, [%2] %3\n\t"
496 "stxa %0, [%1] %4\n\t"
497 "membar #Sync\n\t"
498 "flush %%g6\n\t"
499 "stxa %%g0, [%2] %5\n\t"
500 "stxa %0, [%1] %6\n\t"
501 "membar #Sync\n\t"
502 "flush %%g6"
503 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
504 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
505 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
506 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
507 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
508 : "memory");
509 } else {
510 /* Implement me :-) */
511 BUG();
512 } 507 }
508}
513 509
514 tte_vaddr = (unsigned long) KERNBASE; 510static inline int in_obp_range(unsigned long vaddr)
511{
512 return (vaddr >= LOW_OBP_ADDRESS &&
513 vaddr < HI_OBP_ADDRESS);
514}
515 515
516 /* Spitfire Errata #32 workaround */ 516#define OBP_PMD_SIZE 2048
517 /* NOTE: Using plain zero for the context value is 517static void __init build_obp_pgtable(int prom_trans_ents)
518 * correct here, we are not using the Linux trap 518{
519 * tables yet so we should not use the special 519 unsigned long i;
520 * UltraSPARC-III+ page size encodings yet.
521 */
522 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
523 "flush %%g6"
524 : /* No outputs */
525 : "r" (0),
526 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
527
528 if (tlb_type == spitfire)
529 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
530 else
531 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
532 520
533 kern_locked_tte_data = tte_data; 521 prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
522 for (i = 0; i < OBP_PMD_SIZE; i += 4)
523 store_phys32(prom_pmd_phys + i, 0);
534 524
535 remap_func = (void *) ((unsigned long) &prom_remap - 525 for (i = 0; i < prom_trans_ents; i++) {
536 (unsigned long) &prom_boot_page); 526 unsigned long start, end;
537 527
528 if (!in_obp_range(prom_trans[i].virt))
529 continue;
538 530
539 /* Spitfire Errata #32 workaround */ 531 start = prom_trans[i].virt;
540 /* NOTE: Using plain zero for the context value is 532 end = start + prom_trans[i].size;
541 * correct here, we are not using the Linux trap 533 if (end > HI_OBP_ADDRESS)
542 * tables yet so we should not use the special 534 end = HI_OBP_ADDRESS;
543 * UltraSPARC-III+ page size encodings yet. 535
544 */ 536 build_obp_range(start, end, prom_trans[i].data);
545 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
546 "flush %%g6"
547 : /* No outputs */
548 : "r" (0),
549 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
550
551 remap_func((tlb_type == spitfire ?
552 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
553 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
554 (unsigned long) KERNBASE,
555 prom_get_mmu_ihandle());
556
557 if (bigkernel)
558 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
559 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
560
561 /* Flush out that temporary mapping. */
562 spitfire_flush_dtlb_nucleus_page(0x0);
563 spitfire_flush_itlb_nucleus_page(0x0);
564
565 /* Now lock us back into the TLBs via OBP. */
566 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
567 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
568 if (bigkernel) {
569 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
570 tte_vaddr + 0x400000);
571 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
572 tte_vaddr + 0x400000);
573 } 537 }
538}
539
540/* Read OBP translations property into 'prom_trans[]'.
541 * Return the number of entries.
542 */
543static int __init read_obp_translations(void)
544{
545 int n, node;
574 546
575 /* Re-read translations property. */ 547 node = prom_finddevice("/virtual-memory");
576 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 548 n = prom_getproplen(node, "translations");
577 prom_printf("Couldn't get translation property\n"); 549 if (unlikely(n == 0 || n == -1)) {
550 prom_printf("prom_mappings: Couldn't get size.\n");
551 prom_halt();
552 }
553 if (unlikely(n > sizeof(prom_trans))) {
554 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
578 prom_halt(); 555 prom_halt();
579 } 556 }
580 n = n / sizeof(*trans);
581 557
582 for (i = 0; i < n; i++) { 558 if ((n = prom_getproperty(node, "translations",
583 unsigned long vaddr = trans[i].virt; 559 (char *)&prom_trans[0],
584 unsigned long size = trans[i].size; 560 sizeof(prom_trans))) == -1) {
561 prom_printf("prom_mappings: Couldn't get property.\n");
562 prom_halt();
563 }
564 n = n / sizeof(struct linux_prom_translation);
565 return n;
566}
585 567
586 if (vaddr < 0xf0000000UL) { 568static void __init remap_kernel(void)
587 unsigned long avoid_start = (unsigned long) KERNBASE; 569{
588 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); 570 unsigned long phys_page, tte_vaddr, tte_data;
571 int tlb_ent = sparc64_highest_locked_tlbent();
589 572
590 if (bigkernel) 573 tte_vaddr = (unsigned long) KERNBASE;
591 avoid_end += (4 * 1024 * 1024); 574 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
592 if (vaddr < avoid_start) { 575 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
593 unsigned long top = vaddr + size; 576 _PAGE_CP | _PAGE_CV | _PAGE_P |
577 _PAGE_L | _PAGE_W));
594 578
595 if (top > avoid_start) 579 kern_locked_tte_data = tte_data;
596 top = avoid_start;
597 prom_unmap(top - vaddr, vaddr);
598 }
599 if ((vaddr + size) > avoid_end) {
600 unsigned long bottom = vaddr;
601 580
602 if (bottom < avoid_end) 581 /* Now lock us into the TLBs via OBP. */
603 bottom = avoid_end; 582 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
604 prom_unmap((vaddr + size) - bottom, bottom); 583 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
605 } 584 if (bigkernel) {
606 } 585 prom_dtlb_load(tlb_ent - 1,
586 tte_data + 0x400000,
587 tte_vaddr + 0x400000);
588 prom_itlb_load(tlb_ent - 1,
589 tte_data + 0x400000,
590 tte_vaddr + 0x400000);
607 } 591 }
592}
593
594static void __init inherit_prom_mappings(void)
595{
596 int n;
597
598 n = read_obp_translations();
599 build_obp_pgtable(n);
600
601 /* Now fixup OBP's idea about where we really are mapped. */
602 prom_printf("Remapping the kernel... ");
603 remap_kernel();
608 604
609 prom_printf("done.\n"); 605 prom_printf("done.\n");
610 606
@@ -1276,14 +1272,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1276 int i; 1272 int i;
1277 1273
1278#ifdef CONFIG_DEBUG_BOOTMEM 1274#ifdef CONFIG_DEBUG_BOOTMEM
1279 prom_printf("bootmem_init: Scan sp_banks, "); 1275 prom_printf("bootmem_init: Scan pavail, ");
1280#endif 1276#endif
1281 1277
1282 bytes_avail = 0UL; 1278 bytes_avail = 0UL;
1283 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1279 for (i = 0; i < pavail_ents; i++) {
1284 end_of_phys_memory = sp_banks[i].base_addr + 1280 end_of_phys_memory = pavail[i].phys_addr +
1285 sp_banks[i].num_bytes; 1281 pavail[i].reg_size;
1286 bytes_avail += sp_banks[i].num_bytes; 1282 bytes_avail += pavail[i].reg_size;
1287 if (cmdline_memory_size) { 1283 if (cmdline_memory_size) {
1288 if (bytes_avail > cmdline_memory_size) { 1284 if (bytes_avail > cmdline_memory_size) {
1289 unsigned long slack = bytes_avail - cmdline_memory_size; 1285 unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1291,12 +1287,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1291 bytes_avail -= slack; 1287 bytes_avail -= slack;
1292 end_of_phys_memory -= slack; 1288 end_of_phys_memory -= slack;
1293 1289
1294 sp_banks[i].num_bytes -= slack; 1290 pavail[i].reg_size -= slack;
1295 if (sp_banks[i].num_bytes == 0) { 1291 if ((long)pavail[i].reg_size <= 0L) {
1296 sp_banks[i].base_addr = 0xdeadbeef; 1292 pavail[i].phys_addr = 0xdeadbeefUL;
1293 pavail[i].reg_size = 0UL;
1294 pavail_ents = i;
1297 } else { 1295 } else {
1298 sp_banks[i+1].num_bytes = 0; 1296 pavail[i+1].reg_size = 0Ul;
1299 sp_banks[i+1].base_addr = 0xdeadbeef; 1297 pavail[i+1].phys_addr = 0xdeadbeefUL;
1298 pavail_ents = i + 1;
1300 } 1299 }
1301 break; 1300 break;
1302 } 1301 }
@@ -1347,17 +1346,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1347#endif 1346#endif
1348 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 1347 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1349 1348
1350 bootmap_base = bootmap_pfn << PAGE_SHIFT;
1351
1352 /* Now register the available physical memory with the 1349 /* Now register the available physical memory with the
1353 * allocator. 1350 * allocator.
1354 */ 1351 */
1355 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1352 for (i = 0; i < pavail_ents; i++) {
1356#ifdef CONFIG_DEBUG_BOOTMEM 1353#ifdef CONFIG_DEBUG_BOOTMEM
1357 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", 1354 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1358 i, sp_banks[i].base_addr, sp_banks[i].num_bytes); 1355 i, pavail[i].phys_addr, pavail[i].reg_size);
1359#endif 1356#endif
1360 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); 1357 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1361 } 1358 }
1362 1359
1363#ifdef CONFIG_BLK_DEV_INITRD 1360#ifdef CONFIG_BLK_DEV_INITRD
@@ -1398,120 +1395,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1398 return end_pfn; 1395 return end_pfn;
1399} 1396}
1400 1397
1398#ifdef CONFIG_DEBUG_PAGEALLOC
1399static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1400{
1401 unsigned long vstart = PAGE_OFFSET + pstart;
1402 unsigned long vend = PAGE_OFFSET + pend;
1403 unsigned long alloc_bytes = 0UL;
1404
1405 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1406 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1407 vstart, vend);
1408 prom_halt();
1409 }
1410
1411 while (vstart < vend) {
1412 unsigned long this_end, paddr = __pa(vstart);
1413 pgd_t *pgd = pgd_offset_k(vstart);
1414 pud_t *pud;
1415 pmd_t *pmd;
1416 pte_t *pte;
1417
1418 pud = pud_offset(pgd, vstart);
1419 if (pud_none(*pud)) {
1420 pmd_t *new;
1421
1422 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1423 alloc_bytes += PAGE_SIZE;
1424 pud_populate(&init_mm, pud, new);
1425 }
1426
1427 pmd = pmd_offset(pud, vstart);
1428 if (!pmd_present(*pmd)) {
1429 pte_t *new;
1430
1431 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1432 alloc_bytes += PAGE_SIZE;
1433 pmd_populate_kernel(&init_mm, pmd, new);
1434 }
1435
1436 pte = pte_offset_kernel(pmd, vstart);
1437 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1438 if (this_end > vend)
1439 this_end = vend;
1440
1441 while (vstart < this_end) {
1442 pte_val(*pte) = (paddr | pgprot_val(prot));
1443
1444 vstart += PAGE_SIZE;
1445 paddr += PAGE_SIZE;
1446 pte++;
1447 }
1448 }
1449
1450 return alloc_bytes;
1451}
1452
1453static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1454static int pall_ents __initdata;
1455
1456extern unsigned int kvmap_linear_patch[1];
1457
1458static void __init kernel_physical_mapping_init(void)
1459{
1460 unsigned long i, mem_alloced = 0UL;
1461
1462 read_obp_memory("reg", &pall[0], &pall_ents);
1463
1464 for (i = 0; i < pall_ents; i++) {
1465 unsigned long phys_start, phys_end;
1466
1467 phys_start = pall[i].phys_addr;
1468 phys_end = phys_start + pall[i].reg_size;
1469 mem_alloced += kernel_map_range(phys_start, phys_end,
1470 PAGE_KERNEL);
1471 }
1472
1473 printk("Allocated %ld bytes for kernel page tables.\n",
1474 mem_alloced);
1475
1476 kvmap_linear_patch[0] = 0x01000000; /* nop */
1477 flushi(&kvmap_linear_patch[0]);
1478
1479 __flush_tlb_all();
1480}
1481
1482void kernel_map_pages(struct page *page, int numpages, int enable)
1483{
1484 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1485 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1486
1487 kernel_map_range(phys_start, phys_end,
1488 (enable ? PAGE_KERNEL : __pgprot(0)));
1489
1490 /* we should perform an IPI and flush all tlbs,
1491 * but that can deadlock->flush only current cpu.
1492 */
1493 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1494 PAGE_OFFSET + phys_end);
1495}
1496#endif
1497
1498unsigned long __init find_ecache_flush_span(unsigned long size)
1499{
1500 int i;
1501
1502 for (i = 0; i < pavail_ents; i++) {
1503 if (pavail[i].reg_size >= size)
1504 return pavail[i].phys_addr;
1505 }
1506
1507 return ~0UL;
1508}
1509
1401/* paging_init() sets up the page tables */ 1510/* paging_init() sets up the page tables */
1402 1511
1403extern void cheetah_ecache_flush_init(void); 1512extern void cheetah_ecache_flush_init(void);
1404 1513
1405static unsigned long last_valid_pfn; 1514static unsigned long last_valid_pfn;
1515pgd_t swapper_pg_dir[2048];
1406 1516
1407void __init paging_init(void) 1517void __init paging_init(void)
1408{ 1518{
1409 extern pmd_t swapper_pmd_dir[1024]; 1519 unsigned long end_pfn, pages_avail, shift;
1410 extern unsigned int sparc64_vpte_patchme1[1]; 1520 unsigned long real_end, i;
1411 extern unsigned int sparc64_vpte_patchme2[1]; 1521
1412 unsigned long alias_base = kern_base + PAGE_OFFSET; 1522 /* Find available physical memory... */
1413 unsigned long second_alias_page = 0; 1523 read_obp_memory("available", &pavail[0], &pavail_ents);
1414 unsigned long pt, flags, end_pfn, pages_avail; 1524
1415 unsigned long shift = alias_base - ((unsigned long)KERNBASE); 1525 phys_base = 0xffffffffffffffffUL;
1416 unsigned long real_end; 1526 for (i = 0; i < pavail_ents; i++)
1527 phys_base = min(phys_base, pavail[i].phys_addr);
1528
1529 pfn_base = phys_base >> PAGE_SHIFT;
1530
1531 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1532 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1417 1533
1418 set_bit(0, mmu_context_bmap); 1534 set_bit(0, mmu_context_bmap);
1419 1535
1536 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1537
1420 real_end = (unsigned long)_end; 1538 real_end = (unsigned long)_end;
1421 if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1539 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1422 bigkernel = 1; 1540 bigkernel = 1;
1423#ifdef CONFIG_BLK_DEV_INITRD 1541 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1424 if (sparc_ramdisk_image || sparc_ramdisk_image64) 1542 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1425 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); 1543 prom_halt();
1426#endif
1427
1428 /* We assume physical memory starts at some 4mb multiple,
1429 * if this were not true we wouldn't boot up to this point
1430 * anyways.
1431 */
1432 pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1433 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1434 local_irq_save(flags);
1435 if (tlb_type == spitfire) {
1436 __asm__ __volatile__(
1437 " stxa %1, [%0] %3\n"
1438 " stxa %2, [%5] %4\n"
1439 " membar #Sync\n"
1440 " flush %%g6\n"
1441 " nop\n"
1442 " nop\n"
1443 " nop\n"
1444 : /* No outputs */
1445 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1446 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1447 : "memory");
1448 if (real_end >= KERNBASE + 0x340000) {
1449 second_alias_page = alias_base + 0x400000;
1450 __asm__ __volatile__(
1451 " stxa %1, [%0] %3\n"
1452 " stxa %2, [%5] %4\n"
1453 " membar #Sync\n"
1454 " flush %%g6\n"
1455 " nop\n"
1456 " nop\n"
1457 " nop\n"
1458 : /* No outputs */
1459 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1460 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1461 : "memory");
1462 }
1463 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1464 __asm__ __volatile__(
1465 " stxa %1, [%0] %3\n"
1466 " stxa %2, [%5] %4\n"
1467 " membar #Sync\n"
1468 " flush %%g6\n"
1469 " nop\n"
1470 " nop\n"
1471 " nop\n"
1472 : /* No outputs */
1473 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1474 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1475 : "memory");
1476 if (real_end >= KERNBASE + 0x340000) {
1477 second_alias_page = alias_base + 0x400000;
1478 __asm__ __volatile__(
1479 " stxa %1, [%0] %3\n"
1480 " stxa %2, [%5] %4\n"
1481 " membar #Sync\n"
1482 " flush %%g6\n"
1483 " nop\n"
1484 " nop\n"
1485 " nop\n"
1486 : /* No outputs */
1487 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1488 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1489 : "memory");
1490 }
1491 } 1544 }
1492 local_irq_restore(flags); 1545
1493 1546 /* Set kernel pgd to upper alias so physical page computations
1494 /* Now set kernel pgd to upper alias so physical page computations
1495 * work. 1547 * work.
1496 */ 1548 */
1497 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1549 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1498 1550
1499 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1551 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1500 1552
1501 /* Now can init the kernel/bad page tables. */ 1553 /* Now can init the kernel/bad page tables. */
1502 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1554 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1503 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1555 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1504 1556
1505 sparc64_vpte_patchme1[0] |= 1557 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1506 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1507 sparc64_vpte_patchme2[0] |=
1508 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1509 flushi((long)&sparc64_vpte_patchme1[0]);
1510 1558
1511 /* Setup bootmem... */
1512 pages_avail = 0;
1513 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1514
1515 /* Inherit non-locked OBP mappings. */ 1559 /* Inherit non-locked OBP mappings. */
1516 inherit_prom_mappings(); 1560 inherit_prom_mappings();
1517 1561
@@ -1527,13 +1571,16 @@ void __init paging_init(void)
1527 1571
1528 inherit_locked_prom_mappings(1); 1572 inherit_locked_prom_mappings(1);
1529 1573
1530 /* We only created DTLB mapping of this stuff. */
1531 spitfire_flush_dtlb_nucleus_page(alias_base);
1532 if (second_alias_page)
1533 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1534
1535 __flush_tlb_all(); 1574 __flush_tlb_all();
1536 1575
1576 /* Setup bootmem... */
1577 pages_avail = 0;
1578 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1579
1580#ifdef CONFIG_DEBUG_PAGEALLOC
1581 kernel_physical_mapping_init();
1582#endif
1583
1537 { 1584 {
1538 unsigned long zones_size[MAX_NR_ZONES]; 1585 unsigned long zones_size[MAX_NR_ZONES];
1539 unsigned long zholes_size[MAX_NR_ZONES]; 1586 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1554,128 +1601,35 @@ void __init paging_init(void)
1554 device_scan(); 1601 device_scan();
1555} 1602}
1556 1603
1557/* Ok, it seems that the prom can allocate some more memory chunks
1558 * as a side effect of some prom calls we perform during the
1559 * boot sequence. My most likely theory is that it is from the
1560 * prom_set_traptable() call, and OBP is allocating a scratchpad
1561 * for saving client program register state etc.
1562 */
1563static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1564{
1565 int swapi = 0;
1566 int i, mitr;
1567 unsigned long tmpaddr, tmpsize;
1568 unsigned long lowest;
1569
1570 for (i = 0; thislist[i].theres_more != 0; i++) {
1571 lowest = thislist[i].start_adr;
1572 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1573 if (thislist[mitr].start_adr < lowest) {
1574 lowest = thislist[mitr].start_adr;
1575 swapi = mitr;
1576 }
1577 if (lowest == thislist[i].start_adr)
1578 continue;
1579 tmpaddr = thislist[swapi].start_adr;
1580 tmpsize = thislist[swapi].num_bytes;
1581 for (mitr = swapi; mitr > i; mitr--) {
1582 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1583 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1584 }
1585 thislist[i].start_adr = tmpaddr;
1586 thislist[i].num_bytes = tmpsize;
1587 }
1588}
1589
1590void __init rescan_sp_banks(void)
1591{
1592 struct linux_prom64_registers memlist[64];
1593 struct linux_mlist_p1275 avail[64], *mlist;
1594 unsigned long bytes, base_paddr;
1595 int num_regs, node = prom_finddevice("/memory");
1596 int i;
1597
1598 num_regs = prom_getproperty(node, "available",
1599 (char *) memlist, sizeof(memlist));
1600 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1601 for (i = 0; i < num_regs; i++) {
1602 avail[i].start_adr = memlist[i].phys_addr;
1603 avail[i].num_bytes = memlist[i].reg_size;
1604 avail[i].theres_more = &avail[i + 1];
1605 }
1606 avail[i - 1].theres_more = NULL;
1607 sort_memlist(avail);
1608
1609 mlist = &avail[0];
1610 i = 0;
1611 bytes = mlist->num_bytes;
1612 base_paddr = mlist->start_adr;
1613
1614 sp_banks[0].base_addr = base_paddr;
1615 sp_banks[0].num_bytes = bytes;
1616
1617 while (mlist->theres_more != NULL){
1618 i++;
1619 mlist = mlist->theres_more;
1620 bytes = mlist->num_bytes;
1621 if (i >= SPARC_PHYS_BANKS-1) {
1622 printk ("The machine has more banks than "
1623 "this kernel can support\n"
1624 "Increase the SPARC_PHYS_BANKS "
1625 "setting (currently %d)\n",
1626 SPARC_PHYS_BANKS);
1627 i = SPARC_PHYS_BANKS-1;
1628 break;
1629 }
1630
1631 sp_banks[i].base_addr = mlist->start_adr;
1632 sp_banks[i].num_bytes = mlist->num_bytes;
1633 }
1634
1635 i++;
1636 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1637 sp_banks[i].num_bytes = 0;
1638
1639 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1640 sp_banks[i].num_bytes &= PAGE_MASK;
1641}
1642
1643static void __init taint_real_pages(void) 1604static void __init taint_real_pages(void)
1644{ 1605{
1645 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1646 int i; 1606 int i;
1647 1607
1648 for (i = 0; i < SPARC_PHYS_BANKS; i++) { 1608 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1649 saved_sp_banks[i].base_addr =
1650 sp_banks[i].base_addr;
1651 saved_sp_banks[i].num_bytes =
1652 sp_banks[i].num_bytes;
1653 }
1654
1655 rescan_sp_banks();
1656 1609
1657 /* Find changes discovered in the sp_bank rescan and 1610 /* Find changes discovered in the physmem available rescan and
1658 * reserve the lost portions in the bootmem maps. 1611 * reserve the lost portions in the bootmem maps.
1659 */ 1612 */
1660 for (i = 0; saved_sp_banks[i].num_bytes; i++) { 1613 for (i = 0; i < pavail_ents; i++) {
1661 unsigned long old_start, old_end; 1614 unsigned long old_start, old_end;
1662 1615
1663 old_start = saved_sp_banks[i].base_addr; 1616 old_start = pavail[i].phys_addr;
1664 old_end = old_start + 1617 old_end = old_start +
1665 saved_sp_banks[i].num_bytes; 1618 pavail[i].reg_size;
1666 while (old_start < old_end) { 1619 while (old_start < old_end) {
1667 int n; 1620 int n;
1668 1621
1669 for (n = 0; sp_banks[n].num_bytes; n++) { 1622 for (n = 0; pavail_rescan_ents; n++) {
1670 unsigned long new_start, new_end; 1623 unsigned long new_start, new_end;
1671 1624
1672 new_start = sp_banks[n].base_addr; 1625 new_start = pavail_rescan[n].phys_addr;
1673 new_end = new_start + sp_banks[n].num_bytes; 1626 new_end = new_start +
1627 pavail_rescan[n].reg_size;
1674 1628
1675 if (new_start <= old_start && 1629 if (new_start <= old_start &&
1676 new_end >= (old_start + PAGE_SIZE)) { 1630 new_end >= (old_start + PAGE_SIZE)) {
1677 set_bit (old_start >> 22, 1631 set_bit(old_start >> 22,
1678 sparc64_valid_addr_bitmap); 1632 sparc64_valid_addr_bitmap);
1679 goto do_next_page; 1633 goto do_next_page;
1680 } 1634 }
1681 } 1635 }
@@ -1695,8 +1649,7 @@ void __init mem_init(void)
1695 1649
1696 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); 1650 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1697 i += 1; 1651 i += 1;
1698 sparc64_valid_addr_bitmap = (unsigned long *) 1652 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1699 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1700 if (sparc64_valid_addr_bitmap == NULL) { 1653 if (sparc64_valid_addr_bitmap == NULL) {
1701 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); 1654 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1702 prom_halt(); 1655 prom_halt();
@@ -1749,7 +1702,7 @@ void __init mem_init(void)
1749 cheetah_ecache_flush_init(); 1702 cheetah_ecache_flush_init();
1750} 1703}
1751 1704
1752void free_initmem (void) 1705void free_initmem(void)
1753{ 1706{
1754 unsigned long addr, initend; 1707 unsigned long addr, initend;
1755 1708
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index b2ee9b53227f..058b8126c1a7 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */
144 144
145#define DTAG_MASK 0x3 145#define DTAG_MASK 0x3
146 146
147 /* This routine is Spitfire specific so the hardcoded
148 * D-cache size and line-size are OK.
149 */
147 .align 64 150 .align 64
148 .globl __flush_dcache_page 151 .globl __flush_dcache_page
149__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ 152__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
150 sethi %uhi(PAGE_OFFSET), %g1 153 sethi %uhi(PAGE_OFFSET), %g1
151 sllx %g1, 32, %g1 154 sllx %g1, 32, %g1
152 sub %o0, %g1, %o0 155 sub %o0, %g1, %o0 ! physical address
153 clr %o4 156 srlx %o0, 11, %o0 ! make D-cache TAG
154 srlx %o0, 11, %o0 157 sethi %hi(1 << 14), %o2 ! D-cache size
155 sethi %hi(1 << 14), %o2 158 sub %o2, (1 << 5), %o2 ! D-cache line size
1561: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group 1591: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
157 add %o4, (1 << 5), %o4 ! IEU0 160 andcc %o3, DTAG_MASK, %g0 ! Valid?
158 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group 161 be,pn %xcc, 2f ! Nope, branch
159 add %o4, (1 << 5), %o4 ! IEU0 162 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
160 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available 163 cmp %o3, %o0 ! TAG match?
161 add %o4, (1 << 5), %o4 ! IEU0 164 bne,pt %xcc, 2f ! Nope, branch
162 andn %o3, DTAG_MASK, %o3 ! IEU1 165 nop
163 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group 166 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
164 add %o4, (1 << 5), %o4 ! IEU0 167 membar #Sync
165 andn %g1, DTAG_MASK, %g1 ! IEU1 1682: brnz,pt %o2, 1b
166 cmp %o0, %o3 ! IEU1 Group 169 sub %o2, (1 << 5), %o2 ! D-cache line size
167 be,a,pn %xcc, dflush1 ! CTI
168 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
169 cmp %o0, %g1 ! IEU1 Group
170 andn %g2, DTAG_MASK, %g2 ! IEU0
171 be,a,pn %xcc, dflush2 ! CTI
172 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
173 cmp %o0, %g2 ! IEU1 Group
174 andn %g3, DTAG_MASK, %g3 ! IEU0
175 be,a,pn %xcc, dflush3 ! CTI
176 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
177 cmp %o0, %g3 ! IEU1 Group
178 be,a,pn %xcc, dflush4 ! CTI
179 sub %o4, (1 << 5), %o4 ! IEU0
1802: cmp %o4, %o2 ! IEU1 Group
181 bne,pt %xcc, 1b ! CTI
182 nop ! IEU0
183 170
184 /* The I-cache does not snoop local stores so we 171 /* The I-cache does not snoop local stores so we
185 * better flush that too when necessary. 172 * better flush that too when necessary.
@@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
189 retl 176 retl
190 nop 177 nop
191 178
192dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
193 add %o4, (1 << 5), %o4
194dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
195 add %o4, (1 << 5), %o4
196dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
197 add %o4, (1 << 5), %o4
198dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
199 add %o4, (1 << 5), %o4
200 membar #Sync
201 ba,pt %xcc, 2b
202 nop
203#endif /* DCACHE_ALIASING_POSSIBLE */ 179#endif /* DCACHE_ALIASING_POSSIBLE */
204 180
205 .previous .text 181 .previous
206 .align 32
207__prefill_dtlb:
208 rdpr %pstate, %g7
209 wrpr %g7, PSTATE_IE, %pstate
210 mov TLB_TAG_ACCESS, %g1
211 stxa %o5, [%g1] ASI_DMMU
212 stxa %o2, [%g0] ASI_DTLB_DATA_IN
213 flush %g6
214 retl
215 wrpr %g7, %pstate
216__prefill_itlb:
217 rdpr %pstate, %g7
218 wrpr %g7, PSTATE_IE, %pstate
219 mov TLB_TAG_ACCESS, %g1
220 stxa %o5, [%g1] ASI_IMMU
221 stxa %o2, [%g0] ASI_ITLB_DATA_IN
222 flush %g6
223 retl
224 wrpr %g7, %pstate
225
226 .globl __update_mmu_cache
227__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
228 srlx %o1, PAGE_SHIFT, %o1
229 andcc %o3, FAULT_CODE_DTLB, %g0
230 sllx %o1, PAGE_SHIFT, %o5
231 bne,pt %xcc, __prefill_dtlb
232 or %o5, %o0, %o5
233 ba,a,pt %xcc, __prefill_itlb
234 182
235 /* Cheetah specific versions, patched at boot time. */ 183 /* Cheetah specific versions, patched at boot time. */
236__cheetah_flush_tlb_mm: /* 18 insns */ 184__cheetah_flush_tlb_mm: /* 18 insns */
@@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */
283 wrpr %g7, 0x0, %pstate 231 wrpr %g7, 0x0, %pstate
284 232
285#ifdef DCACHE_ALIASING_POSSIBLE 233#ifdef DCACHE_ALIASING_POSSIBLE
286flush_dcpage_cheetah: /* 11 insns */ 234__cheetah_flush_dcache_page: /* 11 insns */
287 sethi %uhi(PAGE_OFFSET), %g1 235 sethi %uhi(PAGE_OFFSET), %g1
288 sllx %g1, 32, %g1 236 sllx %g1, 32, %g1
289 sub %o0, %g1, %o0 237 sub %o0, %g1, %o0
@@ -329,8 +277,8 @@ cheetah_patch_cachetlbops:
329#ifdef DCACHE_ALIASING_POSSIBLE 277#ifdef DCACHE_ALIASING_POSSIBLE
330 sethi %hi(__flush_dcache_page), %o0 278 sethi %hi(__flush_dcache_page), %o0
331 or %o0, %lo(__flush_dcache_page), %o0 279 or %o0, %lo(__flush_dcache_page), %o0
332 sethi %hi(flush_dcpage_cheetah), %o1 280 sethi %hi(__cheetah_flush_dcache_page), %o1
333 or %o1, %lo(flush_dcpage_cheetah), %o1 281 or %o1, %lo(__cheetah_flush_dcache_page), %o1
334 call cheetah_patch_one 282 call cheetah_patch_one
335 mov 11, %o2 283 mov 11, %o2
336#endif /* DCACHE_ALIASING_POSSIBLE */ 284#endif /* DCACHE_ALIASING_POSSIBLE */
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
index 8f2420d9e9e6..3d33ed27bc27 100644
--- a/arch/sparc64/prom/Makefile
+++ b/arch/sparc64/prom/Makefile
@@ -6,5 +6,5 @@
6EXTRA_AFLAGS := -ansi 6EXTRA_AFLAGS := -ansi
7EXTRA_CFLAGS := -Werror 7EXTRA_CFLAGS := -Werror
8 8
9lib-y := bootstr.o devops.o init.o memory.o misc.o \ 9lib-y := bootstr.o devops.o init.o misc.o \
10 tree.o console.o printf.o p1275.o map.o cif.o 10 tree.o console.o printf.o p1275.o cif.o
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index 028a53fcb1ec..eae5db8dda56 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -67,7 +67,7 @@ prom_putchar(char c)
67} 67}
68 68
69void 69void
70prom_puts(char *s, int len) 70prom_puts(const char *s, int len)
71{ 71{
72 p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| 72 p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
73 P1275_INOUT(3,1), 73 P1275_INOUT(3,1),
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
index 2c99b21b6981..4641839eb39a 100644
--- a/arch/sparc64/prom/devops.c
+++ b/arch/sparc64/prom/devops.c
@@ -16,7 +16,7 @@
16 * Returns 0 on failure. 16 * Returns 0 on failure.
17 */ 17 */
18int 18int
19prom_devopen(char *dstr) 19prom_devopen(const char *dstr)
20{ 20{
21 return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| 21 return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
22 P1275_INOUT(1,1), 22 P1275_INOUT(1,1),
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index 817faae058cd..f3cc2d8578b2 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -27,7 +27,6 @@ int prom_chosen_node;
27 * failure. It gets passed the pointer to the PROM vector. 27 * failure. It gets passed the pointer to the PROM vector.
28 */ 28 */
29 29
30extern void prom_meminit(void);
31extern void prom_cif_init(void *, void *); 30extern void prom_cif_init(void *, void *);
32 31
33void __init prom_init(void *cif_handler, void *cif_stack) 32void __init prom_init(void *cif_handler, void *cif_stack)
@@ -46,7 +45,7 @@ void __init prom_init(void *cif_handler, void *cif_stack)
46 if((prom_root_node == 0) || (prom_root_node == -1)) 45 if((prom_root_node == 0) || (prom_root_node == -1))
47 prom_halt(); 46 prom_halt();
48 47
49 prom_chosen_node = prom_finddevice("/chosen"); 48 prom_chosen_node = prom_finddevice(prom_chosen_path);
50 if (!prom_chosen_node || prom_chosen_node == -1) 49 if (!prom_chosen_node || prom_chosen_node == -1)
51 prom_halt(); 50 prom_halt();
52 51
@@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack)
90 89
91 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); 90 printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
92 91
93 prom_meminit();
94
95 /* Initialization successful. */ 92 /* Initialization successful. */
96 return; 93 return;
97 94
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S
deleted file mode 100644
index 21b3f9c99ea7..000000000000
--- a/arch/sparc64/prom/map.S
+++ /dev/null
@@ -1,72 +0,0 @@
1/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $
2 * map.S: Tricky coding required to fixup the kernel OBP maps
3 * properly.
4 *
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8 .text
9 .align 8192
10 .globl prom_boot_page
11prom_boot_page:
12call_method:
13 .asciz "call-method"
14 .align 8
15map:
16 .asciz "map"
17 .align 8
18
19 /* When we are invoked, our caller has remapped us to
20 * page zero, therefore we must use PC relative addressing
21 * for everything after we begin performing the unmap/map
22 * calls.
23 */
24 .globl prom_remap
25prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */
26 rd %pc, %g1
27 srl %o2, 0, %o2 ! kill sign extension
28 sethi %hi(p1275buf), %g2
29 or %g2, %lo(p1275buf), %g2
30 ldx [%g2 + 0x10], %g3 ! prom_cif_stack
31 save %g3, -(192 + 128), %sp
32 ldx [%g2 + 0x08], %l0 ! prom_cif_handler
33 mov %g6, %i3
34 mov %g4, %i4
35 mov %g5, %i5
36 flushw
37
38 sethi %hi(prom_remap - call_method), %g7
39 or %g7, %lo(prom_remap - call_method), %g7
40 sub %g1, %g7, %l2 ! call-method string
41 sethi %hi(prom_remap - map), %g7
42 or %g7, %lo(prom_remap - map), %g7
43 sub %g1, %g7, %l4 ! map string
44
45 /* OK, map the 4MB region we really live at. */
46 stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method
47 mov 7, %l5
48 stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args
49 mov 1, %l5
50 stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets
51 stx %l4, [%sp + 2047 + 128 + 0x18] ! map
52 stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle
53 mov -1, %l5
54 stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default
55 sethi %hi(4 * 1024 * 1024), %l5
56 stx %l5, [%sp + 2047 + 128 + 0x30] ! size
57 stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr
58 stx %g0, [%sp + 2047 + 128 + 0x40] ! filler
59 stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr
60 call %l0
61 add %sp, (2047 + 128), %o0 ! argument array
62
63 /* Restore hard-coded globals. */
64 mov %i3, %g6
65 mov %i4, %g4
66 mov %i5, %g5
67
68 /* Wheee.... we are done. */
69 ret
70 restore
71
72 .align 8192
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
deleted file mode 100644
index f4a8143e052c..000000000000
--- a/arch/sparc64/prom/memory.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
2 * memory.c: Prom routine for acquiring various bits of information
3 * about RAM on the machine, both virtual and physical.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11
12#include <asm/openprom.h>
13#include <asm/oplib.h>
14
15/* This routine, for consistency, returns the ram parameters in the
16 * V0 prom memory descriptor format. I choose this format because I
17 * think it was the easiest to work with. I feel the religious
18 * arguments now... ;) Also, I return the linked lists sorted to
19 * prevent paging_init() upset stomach as I have not yet written
20 * the pepto-bismol kernel module yet.
21 */
22
23struct linux_prom64_registers prom_reg_memlist[64];
24struct linux_prom64_registers prom_reg_tmp[64];
25
26struct linux_mlist_p1275 prom_phys_total[64];
27struct linux_mlist_p1275 prom_prom_taken[64];
28struct linux_mlist_p1275 prom_phys_avail[64];
29
30struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
31struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
32struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
33
34struct linux_mem_p1275 prom_memlist;
35
36
37/* Internal Prom library routine to sort a linux_mlist_p1275 memory
38 * list. Used below in initialization.
39 */
40static void __init
41prom_sortmemlist(struct linux_mlist_p1275 *thislist)
42{
43 int swapi = 0;
44 int i, mitr;
45 unsigned long tmpaddr, tmpsize;
46 unsigned long lowest;
47
48 for(i=0; thislist[i].theres_more; i++) {
49 lowest = thislist[i].start_adr;
50 for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
51 if(thislist[mitr].start_adr < lowest) {
52 lowest = thislist[mitr].start_adr;
53 swapi = mitr;
54 }
55 if(lowest == thislist[i].start_adr) continue;
56 tmpaddr = thislist[swapi].start_adr;
57 tmpsize = thislist[swapi].num_bytes;
58 for(mitr = swapi; mitr > i; mitr--) {
59 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
60 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
61 }
62 thislist[i].start_adr = tmpaddr;
63 thislist[i].num_bytes = tmpsize;
64 }
65}
66
67/* Initialize the memory lists based upon the prom version. */
68void __init prom_meminit(void)
69{
70 int node = 0;
71 unsigned int iter, num_regs;
72
73 node = prom_finddevice("/memory");
74 num_regs = prom_getproperty(node, "available",
75 (char *) prom_reg_memlist,
76 sizeof(prom_reg_memlist));
77 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
78 for(iter=0; iter<num_regs; iter++) {
79 prom_phys_avail[iter].start_adr =
80 prom_reg_memlist[iter].phys_addr;
81 prom_phys_avail[iter].num_bytes =
82 prom_reg_memlist[iter].reg_size;
83 prom_phys_avail[iter].theres_more =
84 &prom_phys_avail[iter+1];
85 }
86 prom_phys_avail[iter-1].theres_more = NULL;
87
88 num_regs = prom_getproperty(node, "reg",
89 (char *) prom_reg_memlist,
90 sizeof(prom_reg_memlist));
91 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
92 for(iter=0; iter<num_regs; iter++) {
93 prom_phys_total[iter].start_adr =
94 prom_reg_memlist[iter].phys_addr;
95 prom_phys_total[iter].num_bytes =
96 prom_reg_memlist[iter].reg_size;
97 prom_phys_total[iter].theres_more =
98 &prom_phys_total[iter+1];
99 }
100 prom_phys_total[iter-1].theres_more = NULL;
101
102 node = prom_finddevice("/virtual-memory");
103 num_regs = prom_getproperty(node, "available",
104 (char *) prom_reg_memlist,
105 sizeof(prom_reg_memlist));
106 num_regs = (num_regs/sizeof(struct linux_prom64_registers));
107
108 /* Convert available virtual areas to taken virtual
109 * areas. First sort, then convert.
110 */
111 for(iter=0; iter<num_regs; iter++) {
112 prom_prom_taken[iter].start_adr =
113 prom_reg_memlist[iter].phys_addr;
114 prom_prom_taken[iter].num_bytes =
115 prom_reg_memlist[iter].reg_size;
116 prom_prom_taken[iter].theres_more =
117 &prom_prom_taken[iter+1];
118 }
119 prom_prom_taken[iter-1].theres_more = NULL;
120
121 prom_sortmemlist(prom_prom_taken);
122
123 /* Finally, convert. */
124 for(iter=0; iter<num_regs; iter++) {
125 prom_prom_taken[iter].start_adr =
126 prom_prom_taken[iter].start_adr +
127 prom_prom_taken[iter].num_bytes;
128 prom_prom_taken[iter].num_bytes =
129 prom_prom_taken[iter+1].start_adr -
130 prom_prom_taken[iter].start_adr;
131 }
132 prom_prom_taken[iter-1].num_bytes =
133 -1UL - prom_prom_taken[iter-1].start_adr;
134
135 /* Sort the other two lists. */
136 prom_sortmemlist(prom_phys_total);
137 prom_sortmemlist(prom_phys_avail);
138
139 /* Link all the lists into the top-level descriptor. */
140 prom_memlist.p1275_totphys=&prom_ptot_ptr;
141 prom_memlist.p1275_prommap=&prom_ptak_ptr;
142 prom_memlist.p1275_available=&prom_pavl_ptr;
143}
144
145/* This returns a pointer to our libraries internal p1275 format
146 * memory descriptor.
147 */
148struct linux_mem_p1275 *
149prom_meminfo(void)
150{
151 return &prom_memlist;
152}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 19c44e97e9ee..9b895faf077b 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -17,14 +17,14 @@
17#include <asm/system.h> 17#include <asm/system.h>
18 18
19/* Reset and reboot the machine with the command 'bcommand'. */ 19/* Reset and reboot the machine with the command 'bcommand'. */
20void prom_reboot(char *bcommand) 20void prom_reboot(const char *bcommand)
21{ 21{
22 p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | 22 p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
23 P1275_INOUT(1, 0), bcommand); 23 P1275_INOUT(1, 0), bcommand);
24} 24}
25 25
26/* Forth evaluate the expression contained in 'fstring'. */ 26/* Forth evaluate the expression contained in 'fstring'. */
27void prom_feval(char *fstring) 27void prom_feval(const char *fstring)
28{ 28{
29 if (!fstring || fstring[0] == 0) 29 if (!fstring || fstring[0] == 0)
30 return; 30 return;
@@ -148,21 +148,19 @@ void prom_set_trap_table(unsigned long tba)
148 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); 148 p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
149} 149}
150 150
151int mmu_ihandle_cache = 0;
152
153int prom_get_mmu_ihandle(void) 151int prom_get_mmu_ihandle(void)
154{ 152{
155 int node, ret; 153 int node, ret;
156 154
157 if (mmu_ihandle_cache != 0) 155 if (prom_mmu_ihandle_cache != 0)
158 return mmu_ihandle_cache; 156 return prom_mmu_ihandle_cache;
159 157
160 node = prom_finddevice("/chosen"); 158 node = prom_finddevice(prom_chosen_path);
161 ret = prom_getint(node, "mmu"); 159 ret = prom_getint(node, prom_mmu_name);
162 if (ret == -1 || ret == 0) 160 if (ret == -1 || ret == 0)
163 mmu_ihandle_cache = -1; 161 prom_mmu_ihandle_cache = -1;
164 else 162 else
165 mmu_ihandle_cache = ret; 163 prom_mmu_ihandle_cache = ret;
166 164
167 return ret; 165 return ret;
168} 166}
@@ -190,7 +188,7 @@ long prom_itlb_load(unsigned long index,
190 unsigned long tte_data, 188 unsigned long tte_data,
191 unsigned long vaddr) 189 unsigned long vaddr)
192{ 190{
193 return p1275_cmd("call-method", 191 return p1275_cmd(prom_callmethod_name,
194 (P1275_ARG(0, P1275_ARG_IN_STRING) | 192 (P1275_ARG(0, P1275_ARG_IN_STRING) |
195 P1275_ARG(2, P1275_ARG_IN_64B) | 193 P1275_ARG(2, P1275_ARG_IN_64B) |
196 P1275_ARG(3, P1275_ARG_IN_64B) | 194 P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -207,7 +205,7 @@ long prom_dtlb_load(unsigned long index,
207 unsigned long tte_data, 205 unsigned long tte_data,
208 unsigned long vaddr) 206 unsigned long vaddr)
209{ 207{
210 return p1275_cmd("call-method", 208 return p1275_cmd(prom_callmethod_name,
211 (P1275_ARG(0, P1275_ARG_IN_STRING) | 209 (P1275_ARG(0, P1275_ARG_IN_STRING) |
212 P1275_ARG(2, P1275_ARG_IN_64B) | 210 P1275_ARG(2, P1275_ARG_IN_64B) |
213 P1275_ARG(3, P1275_ARG_IN_64B) | 211 P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -223,13 +221,13 @@ long prom_dtlb_load(unsigned long index,
223int prom_map(int mode, unsigned long size, 221int prom_map(int mode, unsigned long size,
224 unsigned long vaddr, unsigned long paddr) 222 unsigned long vaddr, unsigned long paddr)
225{ 223{
226 int ret = p1275_cmd("call-method", 224 int ret = p1275_cmd(prom_callmethod_name,
227 (P1275_ARG(0, P1275_ARG_IN_STRING) | 225 (P1275_ARG(0, P1275_ARG_IN_STRING) |
228 P1275_ARG(3, P1275_ARG_IN_64B) | 226 P1275_ARG(3, P1275_ARG_IN_64B) |
229 P1275_ARG(4, P1275_ARG_IN_64B) | 227 P1275_ARG(4, P1275_ARG_IN_64B) |
230 P1275_ARG(6, P1275_ARG_IN_64B) | 228 P1275_ARG(6, P1275_ARG_IN_64B) |
231 P1275_INOUT(7, 1)), 229 P1275_INOUT(7, 1)),
232 "map", 230 prom_map_name,
233 prom_get_mmu_ihandle(), 231 prom_get_mmu_ihandle(),
234 mode, 232 mode,
235 size, 233 size,
@@ -244,12 +242,12 @@ int prom_map(int mode, unsigned long size,
244 242
245void prom_unmap(unsigned long size, unsigned long vaddr) 243void prom_unmap(unsigned long size, unsigned long vaddr)
246{ 244{
247 p1275_cmd("call-method", 245 p1275_cmd(prom_callmethod_name,
248 (P1275_ARG(0, P1275_ARG_IN_STRING) | 246 (P1275_ARG(0, P1275_ARG_IN_STRING) |
249 P1275_ARG(2, P1275_ARG_IN_64B) | 247 P1275_ARG(2, P1275_ARG_IN_64B) |
250 P1275_ARG(3, P1275_ARG_IN_64B) | 248 P1275_ARG(3, P1275_ARG_IN_64B) |
251 P1275_INOUT(4, 0)), 249 P1275_INOUT(4, 0)),
252 "unmap", 250 prom_unmap_name,
253 prom_get_mmu_ihandle(), 251 prom_get_mmu_ihandle(),
254 size, 252 size,
255 vaddr); 253 vaddr);
@@ -258,7 +256,7 @@ void prom_unmap(unsigned long size, unsigned long vaddr)
258/* Set aside physical memory which is not touched or modified 256/* Set aside physical memory which is not touched or modified
259 * across soft resets. 257 * across soft resets.
260 */ 258 */
261unsigned long prom_retain(char *name, 259unsigned long prom_retain(const char *name,
262 unsigned long pa_low, unsigned long pa_high, 260 unsigned long pa_low, unsigned long pa_high,
263 long size, long align) 261 long size, long align)
264{ 262{
@@ -290,7 +288,7 @@ int prom_getunumber(int syndrome_code,
290 unsigned long phys_addr, 288 unsigned long phys_addr,
291 char *buf, int buflen) 289 char *buf, int buflen)
292{ 290{
293 return p1275_cmd("call-method", 291 return p1275_cmd(prom_callmethod_name,
294 (P1275_ARG(0, P1275_ARG_IN_STRING) | 292 (P1275_ARG(0, P1275_ARG_IN_STRING) |
295 P1275_ARG(3, P1275_ARG_OUT_BUF) | 293 P1275_ARG(3, P1275_ARG_OUT_BUF) |
296 P1275_ARG(6, P1275_ARG_IN_64B) | 294 P1275_ARG(6, P1275_ARG_IN_64B) |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index 59fe38bba39e..a5a7c5712028 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -46,7 +46,7 @@ static inline unsigned long spitfire_get_primary_context(void)
46 */ 46 */
47DEFINE_SPINLOCK(prom_entry_lock); 47DEFINE_SPINLOCK(prom_entry_lock);
48 48
49long p1275_cmd (char *service, long fmt, ...) 49long p1275_cmd(const char *service, long fmt, ...)
50{ 50{
51 char *p, *q; 51 char *p, *q;
52 unsigned long flags; 52 unsigned long flags;
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
index a6df82cafa0d..660943ee4c2a 100644
--- a/arch/sparc64/prom/printf.c
+++ b/arch/sparc64/prom/printf.c
@@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n)
34} 34}
35 35
36void 36void
37prom_printf(char *fmt, ...) 37prom_printf(const char *fmt, ...)
38{ 38{
39 va_list args; 39 va_list args;
40 int i; 40 int i;
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index ccf73258ebf7..b1ff9e87dcc6 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -69,7 +69,7 @@ prom_getsibling(int node)
69 * Return -1 on error. 69 * Return -1 on error.
70 */ 70 */
71__inline__ int 71__inline__ int
72prom_getproplen(int node, char *prop) 72prom_getproplen(int node, const char *prop)
73{ 73{
74 if((!node) || (!prop)) return -1; 74 if((!node) || (!prop)) return -1;
75 return p1275_cmd ("getproplen", 75 return p1275_cmd ("getproplen",
@@ -83,20 +83,20 @@ prom_getproplen(int node, char *prop)
83 * was successful the length will be returned, else -1 is returned. 83 * was successful the length will be returned, else -1 is returned.
84 */ 84 */
85__inline__ int 85__inline__ int
86prom_getproperty(int node, char *prop, char *buffer, int bufsize) 86prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
87{ 87{
88 int plen; 88 int plen;
89 89
90 plen = prom_getproplen(node, prop); 90 plen = prom_getproplen(node, prop);
91 if((plen > bufsize) || (plen == 0) || (plen == -1)) 91 if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
92 return -1; 92 return -1;
93 else { 93 } else {
94 /* Ok, things seem all right. */ 94 /* Ok, things seem all right. */
95 return p1275_cmd ("getprop", 95 return p1275_cmd(prom_getprop_name,
96 P1275_ARG(1,P1275_ARG_IN_STRING)| 96 P1275_ARG(1,P1275_ARG_IN_STRING)|
97 P1275_ARG(2,P1275_ARG_OUT_BUF)| 97 P1275_ARG(2,P1275_ARG_OUT_BUF)|
98 P1275_INOUT(4, 1), 98 P1275_INOUT(4, 1),
99 node, prop, buffer, P1275_SIZE(plen)); 99 node, prop, buffer, P1275_SIZE(plen));
100 } 100 }
101} 101}
102 102
@@ -104,7 +104,7 @@ prom_getproperty(int node, char *prop, char *buffer, int bufsize)
104 * on failure. 104 * on failure.
105 */ 105 */
106__inline__ int 106__inline__ int
107prom_getint(int node, char *prop) 107prom_getint(int node, const char *prop)
108{ 108{
109 int intprop; 109 int intprop;
110 110
@@ -119,7 +119,7 @@ prom_getint(int node, char *prop)
119 */ 119 */
120 120
121int 121int
122prom_getintdefault(int node, char *property, int deflt) 122prom_getintdefault(int node, const char *property, int deflt)
123{ 123{
124 int retval; 124 int retval;
125 125
@@ -131,7 +131,7 @@ prom_getintdefault(int node, char *property, int deflt)
131 131
132/* Acquire a boolean property, 1=TRUE 0=FALSE. */ 132/* Acquire a boolean property, 1=TRUE 0=FALSE. */
133int 133int
134prom_getbool(int node, char *prop) 134prom_getbool(int node, const char *prop)
135{ 135{
136 int retval; 136 int retval;
137 137
@@ -145,7 +145,7 @@ prom_getbool(int node, char *prop)
145 * buffer. 145 * buffer.
146 */ 146 */
147void 147void
148prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) 148prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
149{ 149{
150 int len; 150 int len;
151 151
@@ -160,7 +160,7 @@ prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
160 * YES = 1 NO = 0 160 * YES = 1 NO = 0
161 */ 161 */
162int 162int
163prom_nodematch(int node, char *name) 163prom_nodematch(int node, const char *name)
164{ 164{
165 char namebuf[128]; 165 char namebuf[128];
166 prom_getproperty(node, "name", namebuf, sizeof(namebuf)); 166 prom_getproperty(node, "name", namebuf, sizeof(namebuf));
@@ -172,7 +172,7 @@ prom_nodematch(int node, char *name)
172 * 'nodename'. Return node if successful, zero if not. 172 * 'nodename'. Return node if successful, zero if not.
173 */ 173 */
174int 174int
175prom_searchsiblings(int node_start, char *nodename) 175prom_searchsiblings(int node_start, const char *nodename)
176{ 176{
177 177
178 int thisnode, error; 178 int thisnode, error;
@@ -294,7 +294,7 @@ prom_firstprop(int node, char *buffer)
294 * property types for this node. 294 * property types for this node.
295 */ 295 */
296__inline__ char * 296__inline__ char *
297prom_nextprop(int node, char *oprop, char *buffer) 297prom_nextprop(int node, const char *oprop, char *buffer)
298{ 298{
299 char buf[32]; 299 char buf[32];
300 300
@@ -314,15 +314,17 @@ prom_nextprop(int node, char *oprop, char *buffer)
314} 314}
315 315
316int 316int
317prom_finddevice(char *name) 317prom_finddevice(const char *name)
318{ 318{
319 if(!name) return 0; 319 if (!name)
320 return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)| 320 return 0;
321 P1275_INOUT(1, 1), 321 return p1275_cmd(prom_finddev_name,
322 name); 322 P1275_ARG(0,P1275_ARG_IN_STRING)|
323 P1275_INOUT(1, 1),
324 name);
323} 325}
324 326
325int prom_node_has_property(int node, char *prop) 327int prom_node_has_property(int node, const char *prop)
326{ 328{
327 char buf [32]; 329 char buf [32];
328 330
@@ -339,7 +341,7 @@ int prom_node_has_property(int node, char *prop)
339 * of 'size' bytes. Return the number of bytes the prom accepted. 341 * of 'size' bytes. Return the number of bytes the prom accepted.
340 */ 342 */
341int 343int
342prom_setprop(int node, char *pname, char *value, int size) 344prom_setprop(int node, const char *pname, char *value, int size)
343{ 345{
344 if(size == 0) return 0; 346 if(size == 0) return 0;
345 if((pname == 0) || (value == 0)) return 0; 347 if((pname == 0) || (value == 0)) return 0;
@@ -364,7 +366,7 @@ prom_inst2pkg(int inst)
364 * FIXME: Should work for v0 as well 366 * FIXME: Should work for v0 as well
365 */ 367 */
366int 368int
367prom_pathtoinode(char *path) 369prom_pathtoinode(const char *path)
368{ 370{
369 int node, inst; 371 int node, inst;
370 372
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 5b5af95721ab..7af37e342e33 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -28,8 +28,6 @@ SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header))
28ARCH_SYMLINKS = include/asm-um/arch $(ARCH_DIR)/include/sysdep $(ARCH_DIR)/os \ 28ARCH_SYMLINKS = include/asm-um/arch $(ARCH_DIR)/include/sysdep $(ARCH_DIR)/os \
29 $(SYMLINK_HEADERS) $(ARCH_DIR)/include/uml-config.h 29 $(SYMLINK_HEADERS) $(ARCH_DIR)/include/uml-config.h
30 30
31GEN_HEADERS += $(ARCH_DIR)/include/task.h $(ARCH_DIR)/include/kern_constants.h
32
33um-modes-$(CONFIG_MODE_TT) += tt 31um-modes-$(CONFIG_MODE_TT) += tt
34um-modes-$(CONFIG_MODE_SKAS) += skas 32um-modes-$(CONFIG_MODE_SKAS) += skas
35 33
@@ -45,9 +43,7 @@ endif
45 43
46ARCH_INCLUDE := -I$(ARCH_DIR)/include 44ARCH_INCLUDE := -I$(ARCH_DIR)/include
47ifneq ($(KBUILD_SRC),) 45ifneq ($(KBUILD_SRC),)
48ARCH_INCLUDE += -I$(ARCH_DIR)/include2
49ARCH_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include 46ARCH_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include
50MRPROPER_DIRS += $(ARCH_DIR)/include2
51endif 47endif
52SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH) 48SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH)
53 49
@@ -87,10 +83,6 @@ CONFIG_KERNEL_HALF_GIGS ?= 0
87 83
88SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000) 84SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000)
89 85
90ifeq ($(CONFIG_MODE_SKAS), y)
91$(SYS_HEADERS) : $(ARCH_DIR)/include/skas_ptregs.h
92endif
93
94.PHONY: linux 86.PHONY: linux
95 87
96all: linux 88all: linux
@@ -111,7 +103,8 @@ else
111$(shell cd $(ARCH_DIR) && ln -sf Kconfig.$(SUBARCH) Kconfig.arch) 103$(shell cd $(ARCH_DIR) && ln -sf Kconfig.$(SUBARCH) Kconfig.arch)
112endif 104endif
113 105
114archprepare: $(ARCH_SYMLINKS) $(SYS_HEADERS) $(GEN_HEADERS) 106archprepare: $(ARCH_SYMLINKS) $(ARCH_DIR)/include/user_constants.h
107prepare: $(ARCH_DIR)/include/kern_constants.h
115 108
116LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static 109LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
117LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib 110LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
@@ -146,15 +139,13 @@ endef
146#When cleaning we don't include .config, so we don't include 139#When cleaning we don't include .config, so we don't include
147#TT or skas makefiles and don't clean skas_ptregs.h. 140#TT or skas makefiles and don't clean skas_ptregs.h.
148CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \ 141CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
149 $(GEN_HEADERS) $(ARCH_DIR)/include/skas_ptregs.h \ 142 $(ARCH_DIR)/include/user_constants.h \
150 $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/Kconfig.arch 143 $(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch
151 144
152MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \ 145MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
153 $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os 146 $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
154 147
155archclean: 148archclean:
156 $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/util
157 $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/os-$(OS)/util
158 @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ 149 @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
159 -o -name '*.gcov' \) -type f -print | xargs rm -f 150 -o -name '*.gcov' \) -type f -print | xargs rm -f
160 151
@@ -180,9 +171,7 @@ $(ARCH_DIR)/include/sysdep:
180 @echo ' SYMLINK $@' 171 @echo ' SYMLINK $@'
181ifneq ($(KBUILD_SRC),) 172ifneq ($(KBUILD_SRC),)
182 $(Q)mkdir -p $(ARCH_DIR)/include 173 $(Q)mkdir -p $(ARCH_DIR)/include
183 $(Q)mkdir -p $(ARCH_DIR)/include2 174 $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep
184 $(Q)ln -fsn sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep
185 $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include2/sysdep
186else 175else
187 $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep 176 $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep
188endif 177endif
@@ -202,8 +191,6 @@ endef
202 191
203define filechk_gen-asm-offsets 192define filechk_gen-asm-offsets
204 (set -e; \ 193 (set -e; \
205 echo "#ifndef __ASM_OFFSETS_H__"; \
206 echo "#define __ASM_OFFSETS_H__"; \
207 echo "/*"; \ 194 echo "/*"; \
208 echo " * DO NOT MODIFY."; \ 195 echo " * DO NOT MODIFY."; \
209 echo " *"; \ 196 echo " *"; \
@@ -212,8 +199,7 @@ define filechk_gen-asm-offsets
212 echo " */"; \ 199 echo " */"; \
213 echo ""; \ 200 echo ""; \
214 sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \ 201 sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
215 echo ""; \ 202 echo ""; )
216 echo "#endif" )
217endef 203endef
218 204
219$(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h 205$(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h
@@ -222,50 +208,18 @@ $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h
222$(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c 208$(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c
223 $(CC) $(USER_CFLAGS) -S -o $@ $< 209 $(CC) $(USER_CFLAGS) -S -o $@ $<
224 210
225$(ARCH_DIR)/user-offsets.h: $(ARCH_DIR)/user-offsets.s 211$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/user-offsets.s
226 $(call filechk,gen-asm-offsets) 212 $(call filechk,gen-asm-offsets)
227 213
228CLEAN_FILES += $(ARCH_DIR)/user-offsets.s $(ARCH_DIR)/user-offsets.h 214CLEAN_FILES += $(ARCH_DIR)/user-offsets.s
229 215
230$(ARCH_DIR)/kernel-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/kernel-offsets.c \ 216$(ARCH_DIR)/kernel-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/kernel-offsets.c \
231 $(ARCH_SYMLINKS) \ 217 archprepare
232 $(SYS_DIR)/sc.h \
233 include/asm include/linux/version.h \
234 include/config/MARKER \
235 $(ARCH_DIR)/include/user_constants.h
236 $(CC) $(CFLAGS) $(NOSTDINC_FLAGS) $(CPPFLAGS) -S -o $@ $< 218 $(CC) $(CFLAGS) $(NOSTDINC_FLAGS) $(CPPFLAGS) -S -o $@ $<
237 219
238$(ARCH_DIR)/kernel-offsets.h: $(ARCH_DIR)/kernel-offsets.s 220$(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/kernel-offsets.s
239 $(call filechk,gen-asm-offsets) 221 $(call filechk,gen-asm-offsets)
240 222
241CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s $(ARCH_DIR)/kernel-offsets.h 223CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s
242
243$(ARCH_DIR)/include/task.h: $(ARCH_DIR)/util/mk_task
244 $(call filechk,gen_header)
245
246$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/os-$(OS)/util/mk_user_constants
247 $(call filechk,gen_header)
248
249$(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/util/mk_constants
250 $(call filechk,gen_header)
251
252$(ARCH_DIR)/include/skas_ptregs.h: $(ARCH_DIR)/kernel/skas/util/mk_ptregs
253 $(call filechk,gen_header)
254
255$(ARCH_DIR)/os-$(OS)/util/mk_user_constants: $(ARCH_DIR)/os-$(OS)/util FORCE ;
256
257$(ARCH_DIR)/util/mk_task $(ARCH_DIR)/util/mk_constants: $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/util \
258 FORCE ;
259
260$(ARCH_DIR)/kernel/skas/util/mk_ptregs: $(ARCH_DIR)/kernel/skas/util FORCE ;
261
262$(ARCH_DIR)/util: scripts_basic $(SYS_DIR)/sc.h $(ARCH_DIR)/kernel-offsets.h FORCE
263 $(Q)$(MAKE) $(build)=$@
264
265$(ARCH_DIR)/kernel/skas/util: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE
266 $(Q)$(MAKE) $(build)=$@
267
268$(ARCH_DIR)/os-$(OS)/util: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE
269 $(Q)$(MAKE) $(build)=$@
270 224
271export SUBARCH USER_CFLAGS OS 225export SUBARCH USER_CFLAGS OS
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 1ab431a53ac3..2ee8a2858117 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -32,25 +32,3 @@ CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
32ifneq ($(CONFIG_GPROF),y) 32ifneq ($(CONFIG_GPROF),y)
33ARCH_CFLAGS += -DUM_FASTCALL 33ARCH_CFLAGS += -DUM_FASTCALL
34endif 34endif
35
36SYS_UTIL_DIR := $(ARCH_DIR)/sys-i386/util
37SYS_HEADERS := $(SYS_DIR)/sc.h $(SYS_DIR)/thread.h
38
39prepare: $(SYS_HEADERS)
40
41$(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc
42 $(call filechk,gen_header)
43
44$(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread
45 $(call filechk,gen_header)
46
47$(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE
48 $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@
49
50$(SYS_UTIL_DIR)/mk_thread: scripts_basic $(ARCH_DIR)/kernel-offsets.h FORCE
51 $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@
52
53$(SYS_UTIL_DIR): scripts_basic include/asm FORCE
54 $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR)
55
56CLEAN_FILES += $(SYS_HEADERS)
diff --git a/arch/um/Makefile-skas b/arch/um/Makefile-skas
index fd18ec572271..ac35de5316a6 100644
--- a/arch/um/Makefile-skas
+++ b/arch/um/Makefile-skas
@@ -10,5 +10,3 @@ CFLAGS-$(CONFIG_GCOV) += $(GCOV_OPT)
10CFLAGS-$(CONFIG_GPROF) += $(GPROF_OPT) 10CFLAGS-$(CONFIG_GPROF) += $(GPROF_OPT)
11LINK-$(CONFIG_GCOV) += $(GCOV_OPT) 11LINK-$(CONFIG_GCOV) += $(GCOV_OPT)
12LINK-$(CONFIG_GPROF) += $(GPROF_OPT) 12LINK-$(CONFIG_GPROF) += $(GPROF_OPT)
13
14GEN_HEADERS += $(ARCH_DIR)/include/skas_ptregs.h
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index 436abbba409b..4f118d5cc2ee 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -12,24 +12,3 @@ CHECKFLAGS += -m64
12 12
13ELF_ARCH := i386:x86-64 13ELF_ARCH := i386:x86-64
14ELF_FORMAT := elf64-x86-64 14ELF_FORMAT := elf64-x86-64
15
16SYS_UTIL_DIR := $(ARCH_DIR)/sys-x86_64/util
17SYS_DIR := $(ARCH_DIR)/include/sysdep-x86_64
18
19SYS_HEADERS = $(SYS_DIR)/sc.h $(SYS_DIR)/thread.h
20
21prepare: $(SYS_HEADERS)
22
23$(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc
24 $(call filechk,gen_header)
25
26$(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread
27 $(call filechk,gen_header)
28
29$(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE
30 $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@
31
32$(SYS_UTIL_DIR)/mk_thread: scripts_basic $(GEN_HEADERS) $(ARCH_DIR)/kernel-offsets.h FORCE
33 $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@
34
35CLEAN_FILES += $(SYS_HEADERS)
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 14a12d6b3df6..16e7dc89f61d 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -19,18 +19,44 @@
19#include "line.h" 19#include "line.h"
20#include "os.h" 20#include "os.h"
21 21
22#ifdef CONFIG_NOCONFIG_CHAN 22/* XXX: could well be moved to somewhere else, if needed. */
23static int my_printf(const char * fmt, ...)
24 __attribute__ ((format (printf, 1, 2)));
25
26static int my_printf(const char * fmt, ...)
27{
28 /* Yes, can be called on atomic context.*/
29 char *buf = kmalloc(4096, GFP_ATOMIC);
30 va_list args;
31 int r;
32
33 if (!buf) {
34 /* We print directly fmt.
35 * Yes, yes, yes, feel free to complain. */
36 r = strlen(fmt);
37 } else {
38 va_start(args, fmt);
39 r = vsprintf(buf, fmt, args);
40 va_end(args);
41 fmt = buf;
42 }
23 43
24/* The printk's here are wrong because we are complaining that there is no 44 if (r)
25 * output device, but printk is printing to that output device. The user will 45 r = os_write_file(1, fmt, r);
26 * never see the error. printf would be better, except it can't run on a 46 return r;
27 * kernel stack because it will overflow it. 47
28 * Use printk for now since that will avoid crashing. 48}
29 */ 49
50#ifdef CONFIG_NOCONFIG_CHAN
51/* Despite its name, there's no added trailing newline. */
52static int my_puts(const char * buf)
53{
54 return os_write_file(1, buf, strlen(buf));
55}
30 56
31static void *not_configged_init(char *str, int device, struct chan_opts *opts) 57static void *not_configged_init(char *str, int device, struct chan_opts *opts)
32{ 58{
33 printk(KERN_ERR "Using a channel type which is configured out of " 59 my_puts("Using a channel type which is configured out of "
34 "UML\n"); 60 "UML\n");
35 return(NULL); 61 return(NULL);
36} 62}
@@ -38,27 +64,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
38static int not_configged_open(int input, int output, int primary, void *data, 64static int not_configged_open(int input, int output, int primary, void *data,
39 char **dev_out) 65 char **dev_out)
40{ 66{
41 printk(KERN_ERR "Using a channel type which is configured out of " 67 my_puts("Using a channel type which is configured out of "
42 "UML\n"); 68 "UML\n");
43 return(-ENODEV); 69 return(-ENODEV);
44} 70}
45 71
46static void not_configged_close(int fd, void *data) 72static void not_configged_close(int fd, void *data)
47{ 73{
48 printk(KERN_ERR "Using a channel type which is configured out of " 74 my_puts("Using a channel type which is configured out of "
49 "UML\n"); 75 "UML\n");
50} 76}
51 77
52static int not_configged_read(int fd, char *c_out, void *data) 78static int not_configged_read(int fd, char *c_out, void *data)
53{ 79{
54 printk(KERN_ERR "Using a channel type which is configured out of " 80 my_puts("Using a channel type which is configured out of "
55 "UML\n"); 81 "UML\n");
56 return(-EIO); 82 return(-EIO);
57} 83}
58 84
59static int not_configged_write(int fd, const char *buf, int len, void *data) 85static int not_configged_write(int fd, const char *buf, int len, void *data)
60{ 86{
61 printk(KERN_ERR "Using a channel type which is configured out of " 87 my_puts("Using a channel type which is configured out of "
62 "UML\n"); 88 "UML\n");
63 return(-EIO); 89 return(-EIO);
64} 90}
@@ -66,7 +92,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data)
66static int not_configged_console_write(int fd, const char *buf, int len, 92static int not_configged_console_write(int fd, const char *buf, int len,
67 void *data) 93 void *data)
68{ 94{
69 printk(KERN_ERR "Using a channel type which is configured out of " 95 my_puts("Using a channel type which is configured out of "
70 "UML\n"); 96 "UML\n");
71 return(-EIO); 97 return(-EIO);
72} 98}
@@ -74,14 +100,14 @@ static int not_configged_console_write(int fd, const char *buf, int len,
74static int not_configged_window_size(int fd, void *data, unsigned short *rows, 100static int not_configged_window_size(int fd, void *data, unsigned short *rows,
75 unsigned short *cols) 101 unsigned short *cols)
76{ 102{
77 printk(KERN_ERR "Using a channel type which is configured out of " 103 my_puts("Using a channel type which is configured out of "
78 "UML\n"); 104 "UML\n");
79 return(-ENODEV); 105 return(-ENODEV);
80} 106}
81 107
82static void not_configged_free(void *data) 108static void not_configged_free(void *data)
83{ 109{
84 printf(KERN_ERR "Using a channel type which is configured out of " 110 my_puts("Using a channel type which is configured out of "
85 "UML\n"); 111 "UML\n");
86} 112}
87 113
@@ -457,7 +483,7 @@ static struct chan *parse_chan(char *str, int pri, int device,
457 } 483 }
458 } 484 }
459 if(ops == NULL){ 485 if(ops == NULL){
460 printk(KERN_ERR "parse_chan couldn't parse \"%s\"\n", 486 my_printf("parse_chan couldn't parse \"%s\"\n",
461 str); 487 str);
462 return(NULL); 488 return(NULL);
463 } 489 }
@@ -465,7 +491,7 @@ static struct chan *parse_chan(char *str, int pri, int device,
465 data = (*ops->init)(str, device, opts); 491 data = (*ops->init)(str, device, opts);
466 if(data == NULL) return(NULL); 492 if(data == NULL) return(NULL);
467 493
468 chan = kmalloc(sizeof(*chan), GFP_KERNEL); 494 chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
469 if(chan == NULL) return(NULL); 495 if(chan == NULL) return(NULL);
470 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list), 496 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
471 .primary = 1, 497 .primary = 1,
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index c41efd207fcc..189839e4f1d4 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -7,7 +7,6 @@
7#include "linux/sched.h" 7#include "linux/sched.h"
8#include "linux/slab.h" 8#include "linux/slab.h"
9#include "linux/interrupt.h" 9#include "linux/interrupt.h"
10#include "linux/irq.h"
11#include "linux/spinlock.h" 10#include "linux/spinlock.h"
12#include "linux/errno.h" 11#include "linux/errno.h"
13#include "asm/atomic.h" 12#include "asm/atomic.h"
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h
index 782ac3a3baf9..356390d1f8b9 100644
--- a/arch/um/include/common-offsets.h
+++ b/arch/um/include/common-offsets.h
@@ -1,7 +1,7 @@
1/* for use by sys-$SUBARCH/kernel-offsets.c */ 1/* for use by sys-$SUBARCH/kernel-offsets.c */
2 2
3OFFSET(TASK_REGS, task_struct, thread.regs); 3OFFSET(HOST_TASK_REGS, task_struct, thread.regs);
4OFFSET(TASK_PID, task_struct, pid); 4OFFSET(HOST_TASK_PID, task_struct, pid);
5DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE); 5DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE);
6DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); 6DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
7DEFINE_STR(UM_KERN_EMERG, KERN_EMERG); 7DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 583329d0a539..6f766e1faecc 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -157,6 +157,10 @@ extern int os_lock_file(int fd, int excl);
157extern void os_early_checks(void); 157extern void os_early_checks(void);
158extern int can_do_skas(void); 158extern int can_do_skas(void);
159 159
160/* Make sure they are clear when running in TT mode. Required by
161 * SEGV_MAYBE_FIXABLE */
162#define clear_can_do_skas() do { ptrace_faultinfo = proc_mm = 0; } while (0)
163
160/* mem.c */ 164/* mem.c */
161extern int create_mem_file(unsigned long len); 165extern int create_mem_file(unsigned long len);
162 166
diff --git a/arch/um/include/skas_ptregs.h b/arch/um/include/skas_ptregs.h
new file mode 100644
index 000000000000..73db19e9c077
--- /dev/null
+++ b/arch/um/include/skas_ptregs.h
@@ -0,0 +1,6 @@
1#ifndef __SKAS_PT_REGS_
2#define __SKAS_PT_REGS_
3
4#include <user_constants.h>
5
6#endif
diff --git a/arch/um/include/sysdep-i386/sc.h b/arch/um/include/sysdep-i386/sc.h
new file mode 100644
index 000000000000..c57d1780ad37
--- /dev/null
+++ b/arch/um/include/sysdep-i386/sc.h
@@ -0,0 +1,44 @@
1#ifndef __SYSDEP_I386_SC_H
2#define __SYSDEP_I386_SC_H
3
4#include <user_constants.h>
5
6#define SC_OFFSET(sc, field) \
7 *((unsigned long *) &(((char *) (sc))[HOST_##field]))
8#define SC_FP_OFFSET(sc, field) \
9 *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
10#define SC_FP_OFFSET_PTR(sc, field, type) \
11 ((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
12
13#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
14#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
15#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
16#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
17#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
18#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
19#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
20#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
21#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
22#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX)
23#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX)
24#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX)
25#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX)
26#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI)
27#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI)
28#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP)
29#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
30#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
31#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
32#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE)
33#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
34#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW)
35#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW)
36#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG)
37#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF)
38#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL)
39#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF)
40#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL)
41#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate)
42#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void)
43
44#endif
diff --git a/arch/um/include/sysdep-i386/sigcontext.h b/arch/um/include/sysdep-i386/sigcontext.h
index 1fe729265167..23fd2644d7ed 100644
--- a/arch/um/include/sysdep-i386/sigcontext.h
+++ b/arch/um/include/sysdep-i386/sigcontext.h
@@ -6,6 +6,7 @@
6#ifndef __SYS_SIGCONTEXT_I386_H 6#ifndef __SYS_SIGCONTEXT_I386_H
7#define __SYS_SIGCONTEXT_I386_H 7#define __SYS_SIGCONTEXT_I386_H
8 8
9#include "uml-config.h"
9#include <sysdep/sc.h> 10#include <sysdep/sc.h>
10 11
11#define IP_RESTART_SYSCALL(ip) ((ip) -= 2) 12#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
@@ -26,7 +27,14 @@
26#define SC_START_SYSCALL(sc) do SC_EAX(sc) = -ENOSYS; while(0) 27#define SC_START_SYSCALL(sc) do SC_EAX(sc) = -ENOSYS; while(0)
27 28
28/* This is Page Fault */ 29/* This is Page Fault */
29#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) 30#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
31
32/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
33#ifdef UML_CONFIG_MODE_SKAS
34#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
35#else
36#define SEGV_MAYBE_FIXABLE(fi) 0
37#endif
30 38
31extern unsigned long *sc_sigmask(void *sc_ptr); 39extern unsigned long *sc_sigmask(void *sc_ptr);
32extern int sc_get_fpregs(unsigned long buf, void *sc_ptr); 40extern int sc_get_fpregs(unsigned long buf, void *sc_ptr);
diff --git a/arch/um/include/sysdep-i386/thread.h b/arch/um/include/sysdep-i386/thread.h
new file mode 100644
index 000000000000..e2bd6bae8b8a
--- /dev/null
+++ b/arch/um/include/sysdep-i386/thread.h
@@ -0,0 +1,11 @@
1#ifndef __UM_THREAD_H
2#define __UM_THREAD_H
3
4#include <kern_constants.h>
5
6#define TASK_DEBUGREGS(task) ((unsigned long *) &(((char *) (task))[HOST_TASK_DEBUGREGS]))
7#ifdef CONFIG_MODE_TT
8#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
9#endif
10
11#endif
diff --git a/arch/um/include/sysdep-x86_64/sc.h b/arch/um/include/sysdep-x86_64/sc.h
new file mode 100644
index 000000000000..a160d9fcc596
--- /dev/null
+++ b/arch/um/include/sysdep-x86_64/sc.h
@@ -0,0 +1,45 @@
1#ifndef __SYSDEP_X86_64_SC_H
2#define __SYSDEP_X86_64_SC_H
3
4/* Copyright (C) 2003 - 2004 PathScale, Inc
5 * Released under the GPL
6 */
7
8#include <user_constants.h>
9
10#define SC_OFFSET(sc, field) \
11 *((unsigned long *) &(((char *) (sc))[HOST_##field]))
12
13#define SC_RBX(sc) SC_OFFSET(sc, SC_RBX)
14#define SC_RCX(sc) SC_OFFSET(sc, SC_RCX)
15#define SC_RDX(sc) SC_OFFSET(sc, SC_RDX)
16#define SC_RSI(sc) SC_OFFSET(sc, SC_RSI)
17#define SC_RDI(sc) SC_OFFSET(sc, SC_RDI)
18#define SC_RBP(sc) SC_OFFSET(sc, SC_RBP)
19#define SC_RAX(sc) SC_OFFSET(sc, SC_RAX)
20#define SC_R8(sc) SC_OFFSET(sc, SC_R8)
21#define SC_R9(sc) SC_OFFSET(sc, SC_R9)
22#define SC_R10(sc) SC_OFFSET(sc, SC_R10)
23#define SC_R11(sc) SC_OFFSET(sc, SC_R11)
24#define SC_R12(sc) SC_OFFSET(sc, SC_R12)
25#define SC_R13(sc) SC_OFFSET(sc, SC_R13)
26#define SC_R14(sc) SC_OFFSET(sc, SC_R14)
27#define SC_R15(sc) SC_OFFSET(sc, SC_R15)
28#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
29#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
30#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
31#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
32#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
33#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
34#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
35#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
36#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
37#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
38#if 0
39#define SC_ORIG_RAX(sc) SC_OFFSET(sc, SC_ORIG_RAX)
40#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
41#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
42#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
43#endif
44
45#endif
diff --git a/arch/um/include/sysdep-x86_64/sigcontext.h b/arch/um/include/sysdep-x86_64/sigcontext.h
index 2a78260d15a0..41073235e7ad 100644
--- a/arch/um/include/sysdep-x86_64/sigcontext.h
+++ b/arch/um/include/sysdep-x86_64/sigcontext.h
@@ -31,7 +31,10 @@
31#define SC_START_SYSCALL(sc) do SC_RAX(sc) = -ENOSYS; while(0) 31#define SC_START_SYSCALL(sc) do SC_RAX(sc) = -ENOSYS; while(0)
32 32
33/* This is Page Fault */ 33/* This is Page Fault */
34#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) 34#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
35
36/* No broken SKAS API, which doesn't pass trap_no, here. */
37#define SEGV_MAYBE_FIXABLE(fi) 0
35 38
36extern unsigned long *sc_sigmask(void *sc_ptr); 39extern unsigned long *sc_sigmask(void *sc_ptr);
37 40
diff --git a/arch/um/include/sysdep-x86_64/thread.h b/arch/um/include/sysdep-x86_64/thread.h
new file mode 100644
index 000000000000..6a76a7f3683f
--- /dev/null
+++ b/arch/um/include/sysdep-x86_64/thread.h
@@ -0,0 +1,10 @@
1#ifndef __UM_THREAD_H
2#define __UM_THREAD_H
3
4#include <kern_constants.h>
5
6#ifdef CONFIG_MODE_TT
7#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
8#endif
9
10#endif
diff --git a/arch/um/include/task.h b/arch/um/include/task.h
new file mode 100644
index 000000000000..6375ba7203c9
--- /dev/null
+++ b/arch/um/include/task.h
@@ -0,0 +1,9 @@
1#ifndef __TASK_H
2#define __TASK_H
3
4#include <kern_constants.h>
5
6#define TASK_REGS(task) ((union uml_pt_regs *) &(((char *) (task))[HOST_TASK_REGS]))
7#define TASK_PID(task) *((int *) &(((char *) (task))[HOST_TASK_PID]))
8
9#endif
diff --git a/arch/um/include/user.h b/arch/um/include/user.h
index 57ee9e261228..0f865ef46918 100644
--- a/arch/um/include/user.h
+++ b/arch/um/include/user.h
@@ -14,7 +14,9 @@ extern void *um_kmalloc_atomic(int size);
14extern void kfree(void *ptr); 14extern void kfree(void *ptr);
15extern int in_aton(char *str); 15extern int in_aton(char *str);
16extern int open_gdb_chan(void); 16extern int open_gdb_chan(void);
17extern int strlcpy(char *, const char *, int); 17/* These use size_t, however unsigned long is correct on both i386 and x86_64. */
18extern unsigned long strlcpy(char *, const char *, unsigned long);
19extern unsigned long strlcat(char *, const char *, unsigned long);
18extern void *um_vmalloc(int size); 20extern void *um_vmalloc(int size);
19extern void vfree(void *ptr); 21extern void vfree(void *ptr);
20 22
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index dcd814971995..bbf94bf2921e 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -9,7 +9,6 @@
9#include "linux/kernel.h" 9#include "linux/kernel.h"
10#include "linux/module.h" 10#include "linux/module.h"
11#include "linux/smp.h" 11#include "linux/smp.h"
12#include "linux/irq.h"
13#include "linux/kernel_stat.h" 12#include "linux/kernel_stat.h"
14#include "linux/interrupt.h" 13#include "linux/interrupt.h"
15#include "linux/random.h" 14#include "linux/random.h"
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 39cf568ccfaf..ea65db679e9c 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -82,7 +82,8 @@ unsigned long alloc_stack(int order, int atomic)
82 unsigned long page; 82 unsigned long page;
83 int flags = GFP_KERNEL; 83 int flags = GFP_KERNEL;
84 84
85 if(atomic) flags |= GFP_ATOMIC; 85 if (atomic)
86 flags = GFP_ATOMIC;
86 page = __get_free_pages(flags, order); 87 page = __get_free_pages(flags, order);
87 if(page == 0) 88 if(page == 0)
88 return(0); 89 return(0);
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index e89218958f38..a52751108aa1 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -340,7 +340,7 @@ static int setup_initial_poll(int fd)
340{ 340{
341 struct pollfd *p; 341 struct pollfd *p;
342 342
343 p = um_kmalloc(sizeof(struct pollfd)); 343 p = um_kmalloc_atomic(sizeof(struct pollfd));
344 if(p == NULL){ 344 if(p == NULL){
345 printk("setup_initial_poll : failed to allocate poll\n"); 345 printk("setup_initial_poll : failed to allocate poll\n");
346 return(-1); 346 return(-1);
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index db36c7c95940..8de471b59c1c 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -6,8 +6,6 @@
6obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \ 6obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \
7 syscall.o tlb.o trap_user.o uaccess.o 7 syscall.o tlb.o trap_user.o uaccess.o
8 8
9subdir- := util
10
11USER_OBJS := process.o clone.o 9USER_OBJS := process.o clone.o
12 10
13include arch/um/scripts/Makefile.rules 11include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/util/Makefile b/arch/um/kernel/skas/util/Makefile
deleted file mode 100644
index f7b7eba83340..000000000000
--- a/arch/um/kernel/skas/util/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1hostprogs-y := mk_ptregs
2always := $(hostprogs-y)
3
4mk_ptregs-objs := mk_ptregs-$(SUBARCH).o
5HOSTCFLAGS_mk_ptregs-$(SUBARCH).o := -I$(objtree)/arch/um
diff --git a/arch/um/kernel/skas/util/mk_ptregs-i386.c b/arch/um/kernel/skas/util/mk_ptregs-i386.c
deleted file mode 100644
index 1f96e1eeb8a7..000000000000
--- a/arch/um/kernel/skas/util/mk_ptregs-i386.c
+++ /dev/null
@@ -1,49 +0,0 @@
1#include <stdio.h>
2#include <user-offsets.h>
3
4#define SHOW(name) printf("#define %s %d\n", #name, name)
5
6int main(int argc, char **argv)
7{
8 printf("/* Automatically generated by "
9 "arch/um/kernel/skas/util/mk_ptregs */\n");
10 printf("\n");
11 printf("#ifndef __SKAS_PT_REGS_\n");
12 printf("#define __SKAS_PT_REGS_\n");
13 printf("\n");
14 SHOW(HOST_FRAME_SIZE);
15 SHOW(HOST_FP_SIZE);
16 SHOW(HOST_XFP_SIZE);
17
18 SHOW(HOST_IP);
19 SHOW(HOST_SP);
20 SHOW(HOST_EFLAGS);
21 SHOW(HOST_EAX);
22 SHOW(HOST_EBX);
23 SHOW(HOST_ECX);
24 SHOW(HOST_EDX);
25 SHOW(HOST_ESI);
26 SHOW(HOST_EDI);
27 SHOW(HOST_EBP);
28 SHOW(HOST_CS);
29 SHOW(HOST_SS);
30 SHOW(HOST_DS);
31 SHOW(HOST_FS);
32 SHOW(HOST_ES);
33 SHOW(HOST_GS);
34
35 printf("\n");
36 printf("#endif\n");
37 return(0);
38}
39
40/*
41 * Overrides for Emacs so that we follow Linus's tabbing style.
42 * Emacs will notice this stuff at the end of the file and automatically
43 * adjust the settings for this buffer only. This must remain at the end
44 * of the file.
45 * ---------------------------------------------------------------------------
46 * Local variables:
47 * c-file-style: "linux"
48 * End:
49 */
diff --git a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c b/arch/um/kernel/skas/util/mk_ptregs-x86_64.c
deleted file mode 100644
index 5fccbfe35f78..000000000000
--- a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include <stdio.h>
8#include <user-offsets.h>
9
10#define SHOW(name) \
11 printf("#define %s (%d / sizeof(unsigned long))\n", #name, name)
12
13int main(int argc, char **argv)
14{
15 printf("/* Automatically generated by "
16 "arch/um/kernel/skas/util/mk_ptregs */\n");
17 printf("\n");
18 printf("#ifndef __SKAS_PT_REGS_\n");
19 printf("#define __SKAS_PT_REGS_\n");
20 SHOW(HOST_FRAME_SIZE);
21 SHOW(HOST_RBX);
22 SHOW(HOST_RCX);
23 SHOW(HOST_RDI);
24 SHOW(HOST_RSI);
25 SHOW(HOST_RDX);
26 SHOW(HOST_RBP);
27 SHOW(HOST_RAX);
28 SHOW(HOST_R8);
29 SHOW(HOST_R9);
30 SHOW(HOST_R10);
31 SHOW(HOST_R11);
32 SHOW(HOST_R12);
33 SHOW(HOST_R13);
34 SHOW(HOST_R14);
35 SHOW(HOST_R15);
36 SHOW(HOST_ORIG_RAX);
37 SHOW(HOST_CS);
38 SHOW(HOST_SS);
39 SHOW(HOST_EFLAGS);
40#if 0
41 SHOW(HOST_FS);
42 SHOW(HOST_GS);
43 SHOW(HOST_DS);
44 SHOW(HOST_ES);
45#endif
46
47 SHOW(HOST_IP);
48 SHOW(HOST_SP);
49 printf("#define HOST_FP_SIZE 0\n");
50 printf("#define HOST_XFP_SIZE 0\n");
51 printf("\n");
52 printf("\n");
53 printf("#endif\n");
54 return(0);
55}
56
57/*
58 * Overrides for Emacs so that we follow Linus's tabbing style.
59 * Emacs will notice this stuff at the end of the file and automatically
60 * adjust the settings for this buffer only. This must remain at the end
61 * of the file.
62 * ---------------------------------------------------------------------------
63 * Local variables:
64 * c-file-style: "linux"
65 * End:
66 */
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 0a562c3c0fd8..f5b0636f9ad7 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -193,12 +193,12 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
193 r = pte_read(*npte); 193 r = pte_read(*npte);
194 w = pte_write(*npte); 194 w = pte_write(*npte);
195 x = pte_exec(*npte); 195 x = pte_exec(*npte);
196 if(!pte_dirty(*npte)) 196 if (!pte_young(*npte)) {
197 w = 0; 197 r = 0;
198 if(!pte_young(*npte)){ 198 w = 0;
199 r = 0; 199 } else if (!pte_dirty(*npte)) {
200 w = 0; 200 w = 0;
201 } 201 }
202 if(force || pte_newpage(*npte)){ 202 if(force || pte_newpage(*npte)){
203 if(pte_present(*npte)) 203 if(pte_present(*npte))
204 ret = add_mmap(addr, 204 ret = add_mmap(addr,
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index 87cc6fd76ced..95c8f8733baf 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -18,6 +18,7 @@
18#include "asm/a.out.h" 18#include "asm/a.out.h"
19#include "asm/current.h" 19#include "asm/current.h"
20#include "asm/irq.h" 20#include "asm/irq.h"
21#include "sysdep/sigcontext.h"
21#include "user_util.h" 22#include "user_util.h"
22#include "kern_util.h" 23#include "kern_util.h"
23#include "kern.h" 24#include "kern.h"
@@ -25,6 +26,9 @@
25#include "mconsole_kern.h" 26#include "mconsole_kern.h"
26#include "mem.h" 27#include "mem.h"
27#include "mem_kern.h" 28#include "mem_kern.h"
29#ifdef CONFIG_MODE_SKAS
30#include "skas.h"
31#endif
28 32
29/* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */ 33/* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */
30int handle_page_fault(unsigned long address, unsigned long ip, 34int handle_page_fault(unsigned long address, unsigned long ip,
@@ -39,6 +43,12 @@ int handle_page_fault(unsigned long address, unsigned long ip,
39 int err = -EFAULT; 43 int err = -EFAULT;
40 44
41 *code_out = SEGV_MAPERR; 45 *code_out = SEGV_MAPERR;
46
47 /* If the fault was during atomic operation, don't take the fault, just
48 * fail. */
49 if (in_atomic())
50 goto out_nosemaphore;
51
42 down_read(&mm->mmap_sem); 52 down_read(&mm->mmap_sem);
43 vma = find_vma(mm, address); 53 vma = find_vma(mm, address);
44 if(!vma) 54 if(!vma)
@@ -89,6 +99,7 @@ survive:
89 flush_tlb_page(vma, address); 99 flush_tlb_page(vma, address);
90out: 100out:
91 up_read(&mm->mmap_sem); 101 up_read(&mm->mmap_sem);
102out_nosemaphore:
92 return(err); 103 return(err);
93 104
94/* 105/*
@@ -125,7 +136,15 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
125 } 136 }
126 else if(current->mm == NULL) 137 else if(current->mm == NULL)
127 panic("Segfault with no mm"); 138 panic("Segfault with no mm");
128 err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); 139
140 if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi))
141 err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);
142 else {
143 err = -EFAULT;
144 /* A thread accessed NULL, we get a fault, but CR2 is invalid.
145 * This code is used in __do_copy_from_user() of TT mode. */
146 address = 0;
147 }
129 148
130 catcher = current->thread.fault_catcher; 149 catcher = current->thread.fault_catcher;
131 if(!err) 150 if(!err)
diff --git a/arch/um/kernel/tt/uaccess_user.c b/arch/um/kernel/tt/uaccess_user.c
index f01475512ecb..8c220f054b61 100644
--- a/arch/um/kernel/tt/uaccess_user.c
+++ b/arch/um/kernel/tt/uaccess_user.c
@@ -22,8 +22,15 @@ int __do_copy_from_user(void *to, const void *from, int n,
22 __do_copy, &faulted); 22 __do_copy, &faulted);
23 TASK_REGS(get_current())->tt = save; 23 TASK_REGS(get_current())->tt = save;
24 24
25 if(!faulted) return(0); 25 if(!faulted)
26 else return(n - (fault - (unsigned long) from)); 26 return 0;
27 else if (fault)
28 return n - (fault - (unsigned long) from);
29 else
30 /* In case of a general protection fault, we don't have the
31 * fault address, so NULL is used instead. Pretend we didn't
32 * copy anything. */
33 return n;
27} 34}
28 35
29static void __do_strncpy(void *dst, const void *src, int count) 36static void __do_strncpy(void *dst, const void *src, int count)
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index f0a275947d34..93dc782dc1cc 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -334,6 +334,8 @@ int linux_main(int argc, char **argv)
334 add_arg(DEFAULT_COMMAND_LINE); 334 add_arg(DEFAULT_COMMAND_LINE);
335 335
336 os_early_checks(); 336 os_early_checks();
337 if (force_tt)
338 clear_can_do_skas();
337 mode_tt = force_tt ? 1 : !can_do_skas(); 339 mode_tt = force_tt ? 1 : !can_do_skas();
338#ifndef CONFIG_MODE_TT 340#ifndef CONFIG_MODE_TT
339 if (mode_tt) { 341 if (mode_tt) {
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 186c28885016..0b21d59ba0cd 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -31,6 +31,8 @@ static char *uml_dir = UML_DIR;
31/* Changed by set_umid */ 31/* Changed by set_umid */
32static int umid_is_random = 1; 32static int umid_is_random = 1;
33static int umid_inited = 0; 33static int umid_inited = 0;
34/* Have we created the files? Should we remove them? */
35static int umid_owned = 0;
34 36
35static int make_umid(int (*printer)(const char *fmt, ...)); 37static int make_umid(int (*printer)(const char *fmt, ...));
36 38
@@ -82,20 +84,21 @@ int __init umid_file_name(char *name, char *buf, int len)
82 84
83extern int tracing_pid; 85extern int tracing_pid;
84 86
85static int __init create_pid_file(void) 87static void __init create_pid_file(void)
86{ 88{
87 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; 89 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
88 char pid[sizeof("nnnnn\0")]; 90 char pid[sizeof("nnnnn\0")];
89 int fd, n; 91 int fd, n;
90 92
91 if(umid_file_name("pid", file, sizeof(file))) return 0; 93 if(umid_file_name("pid", file, sizeof(file)))
94 return;
92 95
93 fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))), 96 fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))),
94 0644); 97 0644);
95 if(fd < 0){ 98 if(fd < 0){
96 printf("Open of machine pid file \"%s\" failed: %s\n", 99 printf("Open of machine pid file \"%s\" failed: %s\n",
97 file, strerror(-fd)); 100 file, strerror(-fd));
98 return 0; 101 return;
99 } 102 }
100 103
101 sprintf(pid, "%d\n", os_getpid()); 104 sprintf(pid, "%d\n", os_getpid());
@@ -103,7 +106,6 @@ static int __init create_pid_file(void)
103 if(n != strlen(pid)) 106 if(n != strlen(pid))
104 printf("Write of pid file failed - err = %d\n", -n); 107 printf("Write of pid file failed - err = %d\n", -n);
105 os_close_file(fd); 108 os_close_file(fd);
106 return 0;
107} 109}
108 110
109static int actually_do_remove(char *dir) 111static int actually_do_remove(char *dir)
@@ -147,7 +149,8 @@ static int actually_do_remove(char *dir)
147void remove_umid_dir(void) 149void remove_umid_dir(void)
148{ 150{
149 char dir[strlen(uml_dir) + UMID_LEN + 1]; 151 char dir[strlen(uml_dir) + UMID_LEN + 1];
150 if(!umid_inited) return; 152 if (!umid_owned)
153 return;
151 154
152 sprintf(dir, "%s%s", uml_dir, umid); 155 sprintf(dir, "%s%s", uml_dir, umid);
153 actually_do_remove(dir); 156 actually_do_remove(dir);
@@ -155,11 +158,12 @@ void remove_umid_dir(void)
155 158
156char *get_umid(int only_if_set) 159char *get_umid(int only_if_set)
157{ 160{
158 if(only_if_set && umid_is_random) return(NULL); 161 if(only_if_set && umid_is_random)
159 return(umid); 162 return NULL;
163 return umid;
160} 164}
161 165
162int not_dead_yet(char *dir) 166static int not_dead_yet(char *dir)
163{ 167{
164 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; 168 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
165 char pid[sizeof("nnnnn\0")], *end; 169 char pid[sizeof("nnnnn\0")], *end;
@@ -193,7 +197,8 @@ int not_dead_yet(char *dir)
193 (p == CHOOSE_MODE(tracing_pid, os_getpid()))) 197 (p == CHOOSE_MODE(tracing_pid, os_getpid())))
194 dead = 1; 198 dead = 1;
195 } 199 }
196 if(!dead) return(1); 200 if(!dead)
201 return(1);
197 return(actually_do_remove(dir)); 202 return(actually_do_remove(dir));
198} 203}
199 204
@@ -232,16 +237,13 @@ static int __init make_uml_dir(void)
232 strlcpy(dir, home, sizeof(dir)); 237 strlcpy(dir, home, sizeof(dir));
233 uml_dir++; 238 uml_dir++;
234 } 239 }
240 strlcat(dir, uml_dir, sizeof(dir));
235 len = strlen(dir); 241 len = strlen(dir);
236 strncat(dir, uml_dir, sizeof(dir) - len); 242 if (len > 0 && dir[len - 1] != '/')
237 len = strlen(dir); 243 strlcat(dir, "/", sizeof(dir));
238 if((len > 0) && (len < sizeof(dir) - 1) && (dir[len - 1] != '/')){
239 dir[len] = '/';
240 dir[len + 1] = '\0';
241 }
242 244
243 uml_dir = malloc(strlen(dir) + 1); 245 uml_dir = malloc(strlen(dir) + 1);
244 if(uml_dir == NULL){ 246 if (uml_dir == NULL) {
245 printf("make_uml_dir : malloc failed, errno = %d\n", errno); 247 printf("make_uml_dir : malloc failed, errno = %d\n", errno);
246 exit(1); 248 exit(1);
247 } 249 }
@@ -286,6 +288,7 @@ static int __init make_umid(int (*printer)(const char *fmt, ...))
286 if(errno == EEXIST){ 288 if(errno == EEXIST){
287 if(not_dead_yet(tmp)){ 289 if(not_dead_yet(tmp)){
288 (*printer)("umid '%s' is in use\n", umid); 290 (*printer)("umid '%s' is in use\n", umid);
291 umid_owned = 0;
289 return(-1); 292 return(-1);
290 } 293 }
291 err = mkdir(tmp, 0777); 294 err = mkdir(tmp, 0777);
@@ -296,7 +299,8 @@ static int __init make_umid(int (*printer)(const char *fmt, ...))
296 return(-1); 299 return(-1);
297 } 300 }
298 301
299 return(0); 302 umid_owned = 1;
303 return 0;
300} 304}
301 305
302__uml_setup("uml_dir=", set_uml_dir, 306__uml_setup("uml_dir=", set_uml_dir,
@@ -309,7 +313,8 @@ static int __init make_umid_setup(void)
309 /* one function with the ordering we need ... */ 313 /* one function with the ordering we need ... */
310 make_uml_dir(); 314 make_uml_dir();
311 make_umid(printf); 315 make_umid(printf);
312 return create_pid_file(); 316 create_pid_file();
317 return 0;
313} 318}
314__uml_postsetup(make_umid_setup); 319__uml_postsetup(make_umid_setup);
315 320
diff --git a/arch/um/os-Linux/elf_aux.c b/arch/um/os-Linux/elf_aux.c
index ab33cb3c74ec..5a99dd3fbed0 100644
--- a/arch/um/os-Linux/elf_aux.c
+++ b/arch/um/os-Linux/elf_aux.c
@@ -12,7 +12,7 @@
12#include "init.h" 12#include "init.h"
13#include "elf_user.h" 13#include "elf_user.h"
14#include "mem_user.h" 14#include "mem_user.h"
15#include <kernel-offsets.h> 15#include <kern_constants.h>
16 16
17/* Use the one from the kernel - the host may miss it, if having old headers. */ 17/* Use the one from the kernel - the host may miss it, if having old headers. */
18#if UM_ELF_CLASS == UM_ELFCLASS32 18#if UM_ELF_CLASS == UM_ELFCLASS32
diff --git a/arch/um/os-Linux/util/Makefile b/arch/um/os-Linux/util/Makefile
deleted file mode 100644
index 9778aed0c314..000000000000
--- a/arch/um/os-Linux/util/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
1hostprogs-y := mk_user_constants
2always := $(hostprogs-y)
3
4HOSTCFLAGS_mk_user_constants.o := -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/util/mk_user_constants.c b/arch/um/os-Linux/util/mk_user_constants.c
deleted file mode 100644
index 4838f30eecf0..000000000000
--- a/arch/um/os-Linux/util/mk_user_constants.c
+++ /dev/null
@@ -1,23 +0,0 @@
1#include <stdio.h>
2#include <user-offsets.h>
3
4int main(int argc, char **argv)
5{
6 printf("/*\n");
7 printf(" * Generated by mk_user_constants\n");
8 printf(" */\n");
9 printf("\n");
10 printf("#ifndef __UM_USER_CONSTANTS_H\n");
11 printf("#define __UM_USER_CONSTANTS_H\n");
12 printf("\n");
13 /* I'd like to use FRAME_SIZE from ptrace.h here, but that's wrong on
14 * x86_64 (216 vs 168 bytes). user_regs_struct is the correct size on
15 * both x86_64 and i386.
16 */
17 printf("#define UM_FRAME_SIZE %d\n", __UM_FRAME_SIZE);
18
19 printf("\n");
20 printf("#endif\n");
21
22 return(0);
23}
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 4ca2a229da49..6dfeb70f6957 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -18,6 +18,4 @@ module.c-dir = kernel
18 18
19$(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS)) 19$(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS))
20 20
21subdir- := util
22
23include arch/um/scripts/Makefile.unmap 21include arch/um/scripts/Makefile.unmap
diff --git a/arch/um/sys-i386/kernel-offsets.c b/arch/um/sys-i386/kernel-offsets.c
index a1070af2bcd8..35db85057506 100644
--- a/arch/um/sys-i386/kernel-offsets.c
+++ b/arch/um/sys-i386/kernel-offsets.c
@@ -18,9 +18,9 @@
18 18
19void foo(void) 19void foo(void)
20{ 20{
21 OFFSET(TASK_DEBUGREGS, task_struct, thread.arch.debugregs); 21 OFFSET(HOST_TASK_DEBUGREGS, task_struct, thread.arch.debugregs);
22#ifdef CONFIG_MODE_TT 22#ifdef CONFIG_MODE_TT
23 OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); 23 OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid);
24#endif 24#endif
25#include <common-offsets.h> 25#include <common-offsets.h>
26} 26}
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 3ceaabceb3d7..677fc26a9bbe 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -7,47 +7,48 @@
7#define DEFINE(sym, val) \ 7#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 8 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9 9
10#define DEFINE_LONGS(sym, val) \
11 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
12
10#define OFFSET(sym, str, mem) \ 13#define OFFSET(sym, str, mem) \
11 DEFINE(sym, offsetof(struct str, mem)); 14 DEFINE(sym, offsetof(struct str, mem));
12 15
13void foo(void) 16void foo(void)
14{ 17{
15 OFFSET(SC_IP, sigcontext, eip); 18 OFFSET(HOST_SC_IP, sigcontext, eip);
16 OFFSET(SC_SP, sigcontext, esp); 19 OFFSET(HOST_SC_SP, sigcontext, esp);
17 OFFSET(SC_FS, sigcontext, fs); 20 OFFSET(HOST_SC_FS, sigcontext, fs);
18 OFFSET(SC_GS, sigcontext, gs); 21 OFFSET(HOST_SC_GS, sigcontext, gs);
19 OFFSET(SC_DS, sigcontext, ds); 22 OFFSET(HOST_SC_DS, sigcontext, ds);
20 OFFSET(SC_ES, sigcontext, es); 23 OFFSET(HOST_SC_ES, sigcontext, es);
21 OFFSET(SC_SS, sigcontext, ss); 24 OFFSET(HOST_SC_SS, sigcontext, ss);
22 OFFSET(SC_CS, sigcontext, cs); 25 OFFSET(HOST_SC_CS, sigcontext, cs);
23 OFFSET(SC_EFLAGS, sigcontext, eflags); 26 OFFSET(HOST_SC_EFLAGS, sigcontext, eflags);
24 OFFSET(SC_EAX, sigcontext, eax); 27 OFFSET(HOST_SC_EAX, sigcontext, eax);
25 OFFSET(SC_EBX, sigcontext, ebx); 28 OFFSET(HOST_SC_EBX, sigcontext, ebx);
26 OFFSET(SC_ECX, sigcontext, ecx); 29 OFFSET(HOST_SC_ECX, sigcontext, ecx);
27 OFFSET(SC_EDX, sigcontext, edx); 30 OFFSET(HOST_SC_EDX, sigcontext, edx);
28 OFFSET(SC_EDI, sigcontext, edi); 31 OFFSET(HOST_SC_EDI, sigcontext, edi);
29 OFFSET(SC_ESI, sigcontext, esi); 32 OFFSET(HOST_SC_ESI, sigcontext, esi);
30 OFFSET(SC_EBP, sigcontext, ebp); 33 OFFSET(HOST_SC_EBP, sigcontext, ebp);
31 OFFSET(SC_TRAPNO, sigcontext, trapno); 34 OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
32 OFFSET(SC_ERR, sigcontext, err); 35 OFFSET(HOST_SC_ERR, sigcontext, err);
33 OFFSET(SC_CR2, sigcontext, cr2); 36 OFFSET(HOST_SC_CR2, sigcontext, cr2);
34 OFFSET(SC_FPSTATE, sigcontext, fpstate); 37 OFFSET(HOST_SC_FPSTATE, sigcontext, fpstate);
35 OFFSET(SC_SIGMASK, sigcontext, oldmask); 38 OFFSET(HOST_SC_SIGMASK, sigcontext, oldmask);
36 OFFSET(SC_FP_CW, _fpstate, cw); 39 OFFSET(HOST_SC_FP_CW, _fpstate, cw);
37 OFFSET(SC_FP_SW, _fpstate, sw); 40 OFFSET(HOST_SC_FP_SW, _fpstate, sw);
38 OFFSET(SC_FP_TAG, _fpstate, tag); 41 OFFSET(HOST_SC_FP_TAG, _fpstate, tag);
39 OFFSET(SC_FP_IPOFF, _fpstate, ipoff); 42 OFFSET(HOST_SC_FP_IPOFF, _fpstate, ipoff);
40 OFFSET(SC_FP_CSSEL, _fpstate, cssel); 43 OFFSET(HOST_SC_FP_CSSEL, _fpstate, cssel);
41 OFFSET(SC_FP_DATAOFF, _fpstate, dataoff); 44 OFFSET(HOST_SC_FP_DATAOFF, _fpstate, dataoff);
42 OFFSET(SC_FP_DATASEL, _fpstate, datasel); 45 OFFSET(HOST_SC_FP_DATASEL, _fpstate, datasel);
43 OFFSET(SC_FP_ST, _fpstate, _st); 46 OFFSET(HOST_SC_FP_ST, _fpstate, _st);
44 OFFSET(SC_FXSR_ENV, _fpstate, _fxsr_env); 47 OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
45 48
46 DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); 49 DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE);
47 DEFINE(HOST_FP_SIZE, 50 DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
48 sizeof(struct user_i387_struct) / sizeof(unsigned long)); 51 DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
49 DEFINE(HOST_XFP_SIZE,
50 sizeof(struct user_fxsr_struct) / sizeof(unsigned long));
51 52
52 DEFINE(HOST_IP, EIP); 53 DEFINE(HOST_IP, EIP);
53 DEFINE(HOST_SP, UESP); 54 DEFINE(HOST_SP, UESP);
@@ -65,5 +66,5 @@ void foo(void)
65 DEFINE(HOST_FS, FS); 66 DEFINE(HOST_FS, FS);
66 DEFINE(HOST_ES, ES); 67 DEFINE(HOST_ES, ES);
67 DEFINE(HOST_GS, GS); 68 DEFINE(HOST_GS, GS);
68 DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); 69 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
69} 70}
diff --git a/arch/um/sys-i386/util/Makefile b/arch/um/sys-i386/util/Makefile
deleted file mode 100644
index bf61afd0b045..000000000000
--- a/arch/um/sys-i386/util/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1hostprogs-y := mk_sc mk_thread
2always := $(hostprogs-y)
3
4HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um
5HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um
diff --git a/arch/um/sys-i386/util/mk_sc.c b/arch/um/sys-i386/util/mk_sc.c
deleted file mode 100644
index 04c0d73433aa..000000000000
--- a/arch/um/sys-i386/util/mk_sc.c
+++ /dev/null
@@ -1,51 +0,0 @@
1#include <stdio.h>
2#include <user-offsets.h>
3
4#define SC_OFFSET(name, field) \
5 printf("#define " #name "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\
6 name)
7
8#define SC_FP_OFFSET(name, field) \
9 printf("#define " #name \
10 "(sc) *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\
11 name)
12
13#define SC_FP_OFFSET_PTR(name, field, type) \
14 printf("#define " #name \
15 "(sc) ((" type " *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\
16 name)
17
18int main(int argc, char **argv)
19{
20 SC_OFFSET(SC_IP, eip);
21 SC_OFFSET(SC_SP, esp);
22 SC_OFFSET(SC_FS, fs);
23 SC_OFFSET(SC_GS, gs);
24 SC_OFFSET(SC_DS, ds);
25 SC_OFFSET(SC_ES, es);
26 SC_OFFSET(SC_SS, ss);
27 SC_OFFSET(SC_CS, cs);
28 SC_OFFSET(SC_EFLAGS, eflags);
29 SC_OFFSET(SC_EAX, eax);
30 SC_OFFSET(SC_EBX, ebx);
31 SC_OFFSET(SC_ECX, ecx);
32 SC_OFFSET(SC_EDX, edx);
33 SC_OFFSET(SC_EDI, edi);
34 SC_OFFSET(SC_ESI, esi);
35 SC_OFFSET(SC_EBP, ebp);
36 SC_OFFSET(SC_TRAPNO, trapno);
37 SC_OFFSET(SC_ERR, err);
38 SC_OFFSET(SC_CR2, cr2);
39 SC_OFFSET(SC_FPSTATE, fpstate);
40 SC_OFFSET(SC_SIGMASK, oldmask);
41 SC_FP_OFFSET(SC_FP_CW, cw);
42 SC_FP_OFFSET(SC_FP_SW, sw);
43 SC_FP_OFFSET(SC_FP_TAG, tag);
44 SC_FP_OFFSET(SC_FP_IPOFF, ipoff);
45 SC_FP_OFFSET(SC_FP_CSSEL, cssel);
46 SC_FP_OFFSET(SC_FP_DATAOFF, dataoff);
47 SC_FP_OFFSET(SC_FP_DATASEL, datasel);
48 SC_FP_OFFSET_PTR(SC_FP_ST, _st, "struct _fpstate");
49 SC_FP_OFFSET_PTR(SC_FXSR_ENV, _fxsr_env, "void");
50 return(0);
51}
diff --git a/arch/um/sys-i386/util/mk_thread.c b/arch/um/sys-i386/util/mk_thread.c
deleted file mode 100644
index 7470d0dda67e..000000000000
--- a/arch/um/sys-i386/util/mk_thread.c
+++ /dev/null
@@ -1,22 +0,0 @@
1#include <stdio.h>
2#include <kernel-offsets.h>
3
4int main(int argc, char **argv)
5{
6 printf("/*\n");
7 printf(" * Generated by mk_thread\n");
8 printf(" */\n");
9 printf("\n");
10 printf("#ifndef __UM_THREAD_H\n");
11 printf("#define __UM_THREAD_H\n");
12 printf("\n");
13 printf("#define TASK_DEBUGREGS(task) ((unsigned long *) "
14 "&(((char *) (task))[%d]))\n", TASK_DEBUGREGS);
15#ifdef TASK_EXTERN_PID
16 printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n",
17 TASK_EXTERN_PID);
18#endif
19 printf("\n");
20 printf("#endif\n");
21 return(0);
22}
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index f0ab574d1e95..06c3633457a2 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -29,6 +29,4 @@ module.c-dir = kernel
29 29
30$(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS)) 30$(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS))
31 31
32subdir- := util
33
34include arch/um/scripts/Makefile.unmap 32include arch/um/scripts/Makefile.unmap
diff --git a/arch/um/sys-x86_64/kernel-offsets.c b/arch/um/sys-x86_64/kernel-offsets.c
index 998541eade41..bfcb104b846e 100644
--- a/arch/um/sys-x86_64/kernel-offsets.c
+++ b/arch/um/sys-x86_64/kernel-offsets.c
@@ -19,7 +19,7 @@
19void foo(void) 19void foo(void)
20{ 20{
21#ifdef CONFIG_MODE_TT 21#ifdef CONFIG_MODE_TT
22 OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); 22 OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid);
23#endif 23#endif
24#include <common-offsets.h> 24#include <common-offsets.h>
25} 25}
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
index 513d17ceafd4..5a585bfbb8c2 100644
--- a/arch/um/sys-x86_64/user-offsets.c
+++ b/arch/um/sys-x86_64/user-offsets.c
@@ -16,71 +16,76 @@ typedef __u32 u32;
16#define DEFINE(sym, val) \ 16#define DEFINE(sym, val) \
17 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 17 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
18 18
19#define DEFINE_LONGS(sym, val) \
20 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
21
19#define OFFSET(sym, str, mem) \ 22#define OFFSET(sym, str, mem) \
20 DEFINE(sym, offsetof(struct str, mem)); 23 DEFINE(sym, offsetof(struct str, mem));
21 24
22void foo(void) 25void foo(void)
23{ 26{
24 OFFSET(SC_RBX, sigcontext, rbx); 27 OFFSET(HOST_SC_RBX, sigcontext, rbx);
25 OFFSET(SC_RCX, sigcontext, rcx); 28 OFFSET(HOST_SC_RCX, sigcontext, rcx);
26 OFFSET(SC_RDX, sigcontext, rdx); 29 OFFSET(HOST_SC_RDX, sigcontext, rdx);
27 OFFSET(SC_RSI, sigcontext, rsi); 30 OFFSET(HOST_SC_RSI, sigcontext, rsi);
28 OFFSET(SC_RDI, sigcontext, rdi); 31 OFFSET(HOST_SC_RDI, sigcontext, rdi);
29 OFFSET(SC_RBP, sigcontext, rbp); 32 OFFSET(HOST_SC_RBP, sigcontext, rbp);
30 OFFSET(SC_RAX, sigcontext, rax); 33 OFFSET(HOST_SC_RAX, sigcontext, rax);
31 OFFSET(SC_R8, sigcontext, r8); 34 OFFSET(HOST_SC_R8, sigcontext, r8);
32 OFFSET(SC_R9, sigcontext, r9); 35 OFFSET(HOST_SC_R9, sigcontext, r9);
33 OFFSET(SC_R10, sigcontext, r10); 36 OFFSET(HOST_SC_R10, sigcontext, r10);
34 OFFSET(SC_R11, sigcontext, r11); 37 OFFSET(HOST_SC_R11, sigcontext, r11);
35 OFFSET(SC_R12, sigcontext, r12); 38 OFFSET(HOST_SC_R12, sigcontext, r12);
36 OFFSET(SC_R13, sigcontext, r13); 39 OFFSET(HOST_SC_R13, sigcontext, r13);
37 OFFSET(SC_R14, sigcontext, r14); 40 OFFSET(HOST_SC_R14, sigcontext, r14);
38 OFFSET(SC_R15, sigcontext, r15); 41 OFFSET(HOST_SC_R15, sigcontext, r15);
39 OFFSET(SC_IP, sigcontext, rip); 42 OFFSET(HOST_SC_IP, sigcontext, rip);
40 OFFSET(SC_SP, sigcontext, rsp); 43 OFFSET(HOST_SC_SP, sigcontext, rsp);
41 OFFSET(SC_CR2, sigcontext, cr2); 44 OFFSET(HOST_SC_CR2, sigcontext, cr2);
42 OFFSET(SC_ERR, sigcontext, err); 45 OFFSET(HOST_SC_ERR, sigcontext, err);
43 OFFSET(SC_TRAPNO, sigcontext, trapno); 46 OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
44 OFFSET(SC_CS, sigcontext, cs); 47 OFFSET(HOST_SC_CS, sigcontext, cs);
45 OFFSET(SC_FS, sigcontext, fs); 48 OFFSET(HOST_SC_FS, sigcontext, fs);
46 OFFSET(SC_GS, sigcontext, gs); 49 OFFSET(HOST_SC_GS, sigcontext, gs);
47 OFFSET(SC_EFLAGS, sigcontext, eflags); 50 OFFSET(HOST_SC_EFLAGS, sigcontext, eflags);
48 OFFSET(SC_SIGMASK, sigcontext, oldmask); 51 OFFSET(HOST_SC_SIGMASK, sigcontext, oldmask);
49#if 0 52#if 0
50 OFFSET(SC_ORIG_RAX, sigcontext, orig_rax); 53 OFFSET(HOST_SC_ORIG_RAX, sigcontext, orig_rax);
51 OFFSET(SC_DS, sigcontext, ds); 54 OFFSET(HOST_SC_DS, sigcontext, ds);
52 OFFSET(SC_ES, sigcontext, es); 55 OFFSET(HOST_SC_ES, sigcontext, es);
53 OFFSET(SC_SS, sigcontext, ss); 56 OFFSET(HOST_SC_SS, sigcontext, ss);
54#endif 57#endif
55 58
56 DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); 59 DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE);
57 DEFINE(HOST_RBX, RBX); 60 DEFINE(HOST_FP_SIZE, 0);
58 DEFINE(HOST_RCX, RCX); 61 DEFINE(HOST_XFP_SIZE, 0);
59 DEFINE(HOST_RDI, RDI); 62 DEFINE_LONGS(HOST_RBX, RBX);
60 DEFINE(HOST_RSI, RSI); 63 DEFINE_LONGS(HOST_RCX, RCX);
61 DEFINE(HOST_RDX, RDX); 64 DEFINE_LONGS(HOST_RDI, RDI);
62 DEFINE(HOST_RBP, RBP); 65 DEFINE_LONGS(HOST_RSI, RSI);
63 DEFINE(HOST_RAX, RAX); 66 DEFINE_LONGS(HOST_RDX, RDX);
64 DEFINE(HOST_R8, R8); 67 DEFINE_LONGS(HOST_RBP, RBP);
65 DEFINE(HOST_R9, R9); 68 DEFINE_LONGS(HOST_RAX, RAX);
66 DEFINE(HOST_R10, R10); 69 DEFINE_LONGS(HOST_R8, R8);
67 DEFINE(HOST_R11, R11); 70 DEFINE_LONGS(HOST_R9, R9);
68 DEFINE(HOST_R12, R12); 71 DEFINE_LONGS(HOST_R10, R10);
69 DEFINE(HOST_R13, R13); 72 DEFINE_LONGS(HOST_R11, R11);
70 DEFINE(HOST_R14, R14); 73 DEFINE_LONGS(HOST_R12, R12);
71 DEFINE(HOST_R15, R15); 74 DEFINE_LONGS(HOST_R13, R13);
72 DEFINE(HOST_ORIG_RAX, ORIG_RAX); 75 DEFINE_LONGS(HOST_R14, R14);
73 DEFINE(HOST_CS, CS); 76 DEFINE_LONGS(HOST_R15, R15);
74 DEFINE(HOST_SS, SS); 77 DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
75 DEFINE(HOST_EFLAGS, EFLAGS); 78 DEFINE_LONGS(HOST_CS, CS);
79 DEFINE_LONGS(HOST_SS, SS);
80 DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
76#if 0 81#if 0
77 DEFINE(HOST_FS, FS); 82 DEFINE_LONGS(HOST_FS, FS);
78 DEFINE(HOST_GS, GS); 83 DEFINE_LONGS(HOST_GS, GS);
79 DEFINE(HOST_DS, DS); 84 DEFINE_LONGS(HOST_DS, DS);
80 DEFINE(HOST_ES, ES); 85 DEFINE_LONGS(HOST_ES, ES);
81#endif 86#endif
82 87
83 DEFINE(HOST_IP, RIP); 88 DEFINE_LONGS(HOST_IP, RIP);
84 DEFINE(HOST_SP, RSP); 89 DEFINE_LONGS(HOST_SP, RSP);
85 DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); 90 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
86} 91}
diff --git a/arch/um/sys-x86_64/util/Makefile b/arch/um/sys-x86_64/util/Makefile
deleted file mode 100644
index 75b052cfc206..000000000000
--- a/arch/um/sys-x86_64/util/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1# Copyright 2003 - 2004 Pathscale, Inc
2# Released under the GPL
3
4hostprogs-y := mk_sc mk_thread
5always := $(hostprogs-y)
6
7HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um
8HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um
diff --git a/arch/um/sys-x86_64/util/mk_sc.c b/arch/um/sys-x86_64/util/mk_sc.c
deleted file mode 100644
index 7619bc377c1f..000000000000
--- a/arch/um/sys-x86_64/util/mk_sc.c
+++ /dev/null
@@ -1,47 +0,0 @@
1/* Copyright (C) 2003 - 2004 PathScale, Inc
2 * Released under the GPL
3 */
4
5#include <stdio.h>
6#include <user-offsets.h>
7
8#define SC_OFFSET(name) \
9 printf("#define " #name \
10 "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\
11 name)
12
13int main(int argc, char **argv)
14{
15 SC_OFFSET(SC_RBX);
16 SC_OFFSET(SC_RCX);
17 SC_OFFSET(SC_RDX);
18 SC_OFFSET(SC_RSI);
19 SC_OFFSET(SC_RDI);
20 SC_OFFSET(SC_RBP);
21 SC_OFFSET(SC_RAX);
22 SC_OFFSET(SC_R8);
23 SC_OFFSET(SC_R9);
24 SC_OFFSET(SC_R10);
25 SC_OFFSET(SC_R11);
26 SC_OFFSET(SC_R12);
27 SC_OFFSET(SC_R13);
28 SC_OFFSET(SC_R14);
29 SC_OFFSET(SC_R15);
30 SC_OFFSET(SC_IP);
31 SC_OFFSET(SC_SP);
32 SC_OFFSET(SC_CR2);
33 SC_OFFSET(SC_ERR);
34 SC_OFFSET(SC_TRAPNO);
35 SC_OFFSET(SC_CS);
36 SC_OFFSET(SC_FS);
37 SC_OFFSET(SC_GS);
38 SC_OFFSET(SC_EFLAGS);
39 SC_OFFSET(SC_SIGMASK);
40#if 0
41 SC_OFFSET(SC_ORIG_RAX);
42 SC_OFFSET(SC_DS);
43 SC_OFFSET(SC_ES);
44 SC_OFFSET(SC_SS);
45#endif
46 return(0);
47}
diff --git a/arch/um/sys-x86_64/util/mk_thread.c b/arch/um/sys-x86_64/util/mk_thread.c
deleted file mode 100644
index 15517396e9cf..000000000000
--- a/arch/um/sys-x86_64/util/mk_thread.c
+++ /dev/null
@@ -1,20 +0,0 @@
1#include <stdio.h>
2#include <kernel-offsets.h>
3
4int main(int argc, char **argv)
5{
6 printf("/*\n");
7 printf(" * Generated by mk_thread\n");
8 printf(" */\n");
9 printf("\n");
10 printf("#ifndef __UM_THREAD_H\n");
11 printf("#define __UM_THREAD_H\n");
12 printf("\n");
13#ifdef TASK_EXTERN_PID
14 printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n",
15 TASK_EXTERN_PID);
16#endif
17 printf("\n");
18 printf("#endif\n");
19 return(0);
20}
diff --git a/arch/um/util/Makefile b/arch/um/util/Makefile
deleted file mode 100644
index 4c7551c28033..000000000000
--- a/arch/um/util/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1hostprogs-y := mk_task mk_constants
2always := $(hostprogs-y)
3
4HOSTCFLAGS_mk_task.o := -I$(objtree)/arch/um
5HOSTCFLAGS_mk_constants.o := -I$(objtree)/arch/um
diff --git a/arch/um/util/mk_constants.c b/arch/um/util/mk_constants.c
deleted file mode 100644
index ab217becc36a..000000000000
--- a/arch/um/util/mk_constants.c
+++ /dev/null
@@ -1,32 +0,0 @@
1#include <stdio.h>
2#include <kernel-offsets.h>
3
4#define SHOW_INT(sym) printf("#define %s %d\n", #sym, sym)
5#define SHOW_STR(sym) printf("#define %s %s\n", #sym, sym)
6
7int main(int argc, char **argv)
8{
9 printf("/*\n");
10 printf(" * Generated by mk_constants\n");
11 printf(" */\n");
12 printf("\n");
13 printf("#ifndef __UM_CONSTANTS_H\n");
14 printf("#define __UM_CONSTANTS_H\n");
15 printf("\n");
16
17 SHOW_INT(UM_KERN_PAGE_SIZE);
18
19 SHOW_STR(UM_KERN_EMERG);
20 SHOW_STR(UM_KERN_ALERT);
21 SHOW_STR(UM_KERN_CRIT);
22 SHOW_STR(UM_KERN_ERR);
23 SHOW_STR(UM_KERN_WARNING);
24 SHOW_STR(UM_KERN_NOTICE);
25 SHOW_STR(UM_KERN_INFO);
26 SHOW_STR(UM_KERN_DEBUG);
27
28 SHOW_INT(UM_NSEC_PER_SEC);
29 printf("\n");
30 printf("#endif\n");
31 return(0);
32}
diff --git a/arch/um/util/mk_task.c b/arch/um/util/mk_task.c
deleted file mode 100644
index 36c9606505e2..000000000000
--- a/arch/um/util/mk_task.c
+++ /dev/null
@@ -1,30 +0,0 @@
1#include <stdio.h>
2#include <kernel-offsets.h>
3
4void print_ptr(char *name, char *type, int offset)
5{
6 printf("#define %s(task) ((%s *) &(((char *) (task))[%d]))\n", name, type,
7 offset);
8}
9
10void print(char *name, char *type, int offset)
11{
12 printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type,
13 offset);
14}
15
16int main(int argc, char **argv)
17{
18 printf("/*\n");
19 printf(" * Generated by mk_task\n");
20 printf(" */\n");
21 printf("\n");
22 printf("#ifndef __TASK_H\n");
23 printf("#define __TASK_H\n");
24 printf("\n");
25 print_ptr("TASK_REGS", "union uml_pt_regs", TASK_REGS);
26 print("TASK_PID", "int", TASK_PID);
27 printf("\n");
28 printf("#endif\n");
29 return(0);
30}
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index df08c43276a0..76a28b007be9 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
77int __kprobes arch_prepare_kprobe(struct kprobe *p) 77int __kprobes arch_prepare_kprobe(struct kprobe *p)
78{ 78{
79 /* insn: must be on special executable page on x86_64. */ 79 /* insn: must be on special executable page on x86_64. */
80 up(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 down(&kprobe_mutex); 80 down(&kprobe_mutex);
81 p->ainsn.insn = get_insn_slot();
82 up(&kprobe_mutex);
83 if (!p->ainsn.insn) { 83 if (!p->ainsn.insn) {
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
@@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
231 231
232void __kprobes arch_remove_kprobe(struct kprobe *p) 232void __kprobes arch_remove_kprobe(struct kprobe *p)
233{ 233{
234 up(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 down(&kprobe_mutex); 234 down(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn);
236 up(&kprobe_mutex);
237} 237}
238 238
239static inline void save_previous_kprobe(void) 239static inline void save_previous_kprobe(void)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 08203b07f4bd..69541db5ff2c 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -54,9 +54,12 @@ void mce_log(struct mce *mce)
54{ 54{
55 unsigned next, entry; 55 unsigned next, entry;
56 mce->finished = 0; 56 mce->finished = 0;
57 smp_wmb(); 57 wmb();
58 for (;;) { 58 for (;;) {
59 entry = rcu_dereference(mcelog.next); 59 entry = rcu_dereference(mcelog.next);
60 /* The rmb forces the compiler to reload next in each
61 iteration */
62 rmb();
60 for (;;) { 63 for (;;) {
61 /* When the buffer fills up discard new entries. Assume 64 /* When the buffer fills up discard new entries. Assume
62 that the earlier errors are the more interesting. */ 65 that the earlier errors are the more interesting. */
@@ -69,6 +72,7 @@ void mce_log(struct mce *mce)
69 entry++; 72 entry++;
70 continue; 73 continue;
71 } 74 }
75 break;
72 } 76 }
73 smp_rmb(); 77 smp_rmb();
74 next = entry + 1; 78 next = entry + 1;
@@ -76,9 +80,9 @@ void mce_log(struct mce *mce)
76 break; 80 break;
77 } 81 }
78 memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); 82 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
79 smp_wmb(); 83 wmb();
80 mcelog.entry[entry].finished = 1; 84 mcelog.entry[entry].finished = 1;
81 smp_wmb(); 85 wmb();
82 86
83 if (!test_and_set_bit(0, &console_logged)) 87 if (!test_and_set_bit(0, &console_logged))
84 notify_user = 1; 88 notify_user = 1;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 238f73e1a834..cb28df14ff6f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -831,8 +831,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
831#endif 831#endif
832} 832}
833 833
834#define HWCR 0xc0010015
835
836static int __init init_amd(struct cpuinfo_x86 *c) 834static int __init init_amd(struct cpuinfo_x86 *c)
837{ 835{
838 int r; 836 int r;
@@ -841,14 +839,18 @@ static int __init init_amd(struct cpuinfo_x86 *c)
841#ifdef CONFIG_SMP 839#ifdef CONFIG_SMP
842 unsigned long value; 840 unsigned long value;
843 841
844 // Disable TLB flush filter by setting HWCR.FFDIS: 842 /*
845 // bit 6 of msr C001_0015 843 * Disable TLB flush filter by setting HWCR.FFDIS on K8
846 // 844 * bit 6 of msr C001_0015
847 // Errata 63 for SH-B3 steppings 845 *
848 // Errata 122 for all(?) steppings 846 * Errata 63 for SH-B3 steppings
849 rdmsrl(HWCR, value); 847 * Errata 122 for all steppings (F+ have it disabled by default)
850 value |= 1 << 6; 848 */
851 wrmsrl(HWCR, value); 849 if (c->x86 == 15) {
850 rdmsrl(MSR_K8_HWCR, value);
851 value |= 1 << 6;
852 wrmsrl(MSR_K8_HWCR, value);
853 }
852#endif 854#endif
853 855
854 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 856 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
@@ -965,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
965static void srat_detect_node(void) 967static void srat_detect_node(void)
966{ 968{
967#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
968 unsigned apicid, node; 970 unsigned node;
969 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
970 972
971 /* Don't do the funky fallback heuristics the AMD version employs 973 /* Don't do the funky fallback heuristics the AMD version employs
972 for now. */ 974 for now. */
973 apicid = phys_proc_id[cpu]; 975 node = apicid_to_node[hard_smp_processor_id()];
974 node = apicid_to_node[apicid];
975 if (node == NUMA_NO_NODE) 976 if (node == NUMA_NO_NODE)
976 node = 0; 977 node = 0;
977 cpu_to_node[cpu] = node; 978 cpu_to_node[cpu] = node;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 2373cb8b8625..703acde2a1a5 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void)
959 are handled in the OEM check above. */ 959 are handled in the OEM check above. */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
961 return 0; 961 return 0;
962 /* All in a single socket - should be synchronized */
963 if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
964 return 0;
965#endif 962#endif
966 /* Assume multi socket systems are not synchronized */ 963 /* Assume multi socket systems are not synchronized */
967 return num_online_cpus() > 1; 964 return num_online_cpus() > 1;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 80a49d9bd8a7..214803821001 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -167,18 +167,16 @@ void __init numa_init_array(void)
167 mapping. To avoid this fill in the mapping for all possible 167 mapping. To avoid this fill in the mapping for all possible
168 CPUs, as the number of CPUs is not known yet. 168 CPUs, as the number of CPUs is not known yet.
169 We round robin the existing nodes. */ 169 We round robin the existing nodes. */
170 rr = 0; 170 rr = first_node(node_online_map);
171 for (i = 0; i < NR_CPUS; i++) { 171 for (i = 0; i < NR_CPUS; i++) {
172 if (cpu_to_node[i] != NUMA_NO_NODE) 172 if (cpu_to_node[i] != NUMA_NO_NODE)
173 continue; 173 continue;
174 cpu_to_node[i] = rr;
174 rr = next_node(rr, node_online_map); 175 rr = next_node(rr, node_online_map);
175 if (rr == MAX_NUMNODES) 176 if (rr == MAX_NUMNODES)
176 rr = first_node(node_online_map); 177 rr = first_node(node_online_map);
177 cpu_to_node[i] = rr;
178 rr++;
179 } 178 }
180 179
181 set_bit(0, &node_to_cpumask[cpu_to_node(0)]);
182} 180}
183 181
184#ifdef CONFIG_NUMA_EMU 182#ifdef CONFIG_NUMA_EMU
@@ -266,9 +264,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
266 264
267__cpuinit void numa_add_cpu(int cpu) 265__cpuinit void numa_add_cpu(int cpu)
268{ 266{
269 /* BP is initialized elsewhere */ 267 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
270 if (cpu)
271 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
272} 268}
273 269
274unsigned long __init numa_free_all_bootmem(void) 270unsigned long __init numa_free_all_bootmem(void)
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 09887c96e9a1..de19501aa809 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -402,8 +402,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
402 __pci_mmap_set_flags(dev, vma, mmap_state); 402 __pci_mmap_set_flags(dev, vma, mmap_state);
403 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); 403 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
404 404
405 ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT, 405 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
406 vma->vm_end - vma->vm_start, vma->vm_page_prot); 406 vma->vm_end - vma->vm_start,vma->vm_page_prot);
407 407
408 return ret; 408 return ret;
409} 409}
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index cf1362784443..03674daabc66 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -39,7 +39,7 @@ _F(int, pcibios_fixup, (void), { return 0; });
39_F(int, get_rtc_time, (time_t* t), { return 0; }); 39_F(int, get_rtc_time, (time_t* t), { return 0; });
40_F(int, set_rtc_time, (time_t t), { return 0; }); 40_F(int, set_rtc_time, (time_t t), { return 0; });
41 41
42#if CONFIG_XTENSA_CALIBRATE_CCOUNT 42#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
43_F(void, calibrate_ccount, (void), 43_F(void, calibrate_ccount, (void),
44{ 44{
45 printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n"); 45 printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index c83bb0d41787..08ef6d82ee51 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -457,7 +457,7 @@ int
457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) 457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
458{ 458{
459/* see asm/coprocessor.h for this magic number 16 */ 459/* see asm/coprocessor.h for this magic number 16 */
460#if TOTAL_CPEXTRA_SIZE > 16 460#if XTENSA_CP_EXTRA_SIZE > 16
461 do_save_fpregs (r, regs, task); 461 do_save_fpregs (r, regs, task);
462 462
463 /* For now, bit 16 means some extra state may be present: */ 463 /* For now, bit 16 means some extra state may be present: */
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 1f5bf5d624e4..513ed8d67766 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
304# endif 304# endif
305#endif 305#endif
306 306
307#if CONFIG_PCI 307#ifdef CONFIG_PCI
308 platform_pcibios_init(); 308 platform_pcibios_init();
309#endif 309#endif
310} 310}
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index dc42cede9394..e252b61e45a5 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -182,7 +182,7 @@ restore_cpextra (struct _cpstate *buf)
182 182
183 struct task_struct *tsk = current; 183 struct task_struct *tsk = current;
184 release_all_cp(tsk); 184 release_all_cp(tsk);
185 return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE); 185 return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE);
186#endif 186#endif
187 return 0; 187 return 0;
188} 188}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 1ac7d5ce7456..8e423d1335ce 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -68,7 +68,7 @@ void __init time_init(void)
68 * speed for the CALIBRATE. 68 * speed for the CALIBRATE.
69 */ 69 */
70 70
71#if CONFIG_XTENSA_CALIBRATE_CCOUNT 71#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
72 printk("Calibrating CPU frequency "); 72 printk("Calibrating CPU frequency ");
73 platform_calibrate_ccount(); 73 platform_calibrate_ccount();
74 printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), 74 printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 56aace84aaeb..5a91d6c9e66d 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -239,7 +239,7 @@ void __init mem_init(void)
239 high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); 239 high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
240 highmemsize = 0; 240 highmemsize = 0;
241 241
242#if CONFIG_HIGHMEM 242#ifdef CONFIG_HIGHMEM
243#error HIGHGMEM not implemented in init.c 243#error HIGHGMEM not implemented in init.c
244#endif 244#endif
245 245
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 6ba48f346fcf..041bb47b5c39 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -376,7 +376,7 @@ static int __init drm_core_init(void)
376 goto err_p2; 376 goto err_p2;
377 } 377 }
378 378
379 drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL); 379 drm_proc_root = proc_mkdir("dri", NULL);
380 if (!drm_proc_root) { 380 if (!drm_proc_root) {
381 DRM_ERROR("Cannot create /proc/dri\n"); 381 DRM_ERROR("Cannot create /proc/dri\n");
382 ret = -1; 382 ret = -1;
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 32d2bb99462c..977961002488 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -95,7 +95,7 @@ int drm_proc_init(drm_device_t *dev, int minor,
95 char name[64]; 95 char name[64];
96 96
97 sprintf(name, "%d", minor); 97 sprintf(name, "%d", minor);
98 *dev_root = create_proc_entry(name, S_IFDIR, root); 98 *dev_root = proc_mkdir(name, root);
99 if (!*dev_root) { 99 if (!*dev_root) {
100 DRM_ERROR("Cannot create /proc/dri/%s\n", name); 100 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
101 return -1; 101 return -1;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index de0379b6d502..c055bb630ffc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
273 273
274 vma->vm_flags |= VM_IO; 274 vma->vm_flags |= VM_IO;
275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
276 addr = __pa(addr);
277 276
278 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 277 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
279 PAGE_SIZE, vma->vm_page_prot)) { 278 PAGE_SIZE, vma->vm_page_prot)) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 463351d4f942..32fa82c78c73 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2621 if (!list_empty(&(intf->waiting_msgs))) { 2621 if (!list_empty(&(intf->waiting_msgs))) {
2622 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2622 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2623 spin_unlock(&(intf->waiting_msgs_lock)); 2623 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2624 goto out_unlock; 2624 goto out_unlock;
2625 } 2625 }
2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
@@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2629 if (rv > 0) { 2629 if (rv > 0) {
2630 /* Could not handle the message now, just add it to a 2630 /* Could not handle the message now, just add it to a
2631 list to handle later. */ 2631 list to handle later. */
2632 spin_lock(&(intf->waiting_msgs_lock)); 2632 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2633 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2633 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2634 spin_unlock(&(intf->waiting_msgs_lock)); 2634 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2635 } else if (rv == 0) { 2635 } else if (rv == 0) {
2636 ipmi_free_smi_msg(msg); 2636 ipmi_free_smi_msg(msg);
2637 } 2637 }
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e82a96ba396b..f66947722e12 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -55,7 +55,7 @@ extern void (*pm_power_off)(void);
55static int poweroff_powercycle; 55static int poweroff_powercycle;
56 56
57/* parameter definition to allow user to flag power cycle */ 57/* parameter definition to allow user to flag power cycle */
58module_param(poweroff_powercycle, int, 0); 58module_param(poweroff_powercycle, int, 0644);
59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
60 60
61/* Stuff from the get device id command. */ 61/* Stuff from the get device id command. */
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 2291a87e8ada..97d6dc24b800 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -229,8 +229,8 @@ static int __init r3964_init(void)
229 TRACE_L("line discipline %d registered", N_R3964); 229 TRACE_L("line discipline %d registered", N_R3964);
230 TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags, 230 TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags,
231 tty_ldisc_N_R3964.num); 231 tty_ldisc_N_R3964.num);
232 TRACE_L("open=%x", (int)tty_ldisc_N_R3964.open); 232 TRACE_L("open=%p", tty_ldisc_N_R3964.open);
233 TRACE_L("tty_ldisc_N_R3964 = %x", (int)&tty_ldisc_N_R3964); 233 TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964);
234 } 234 }
235 else 235 else
236 { 236 {
@@ -267,8 +267,8 @@ static void add_tx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
267 267
268 spin_unlock_irqrestore(&pInfo->lock, flags); 268 spin_unlock_irqrestore(&pInfo->lock, flags);
269 269
270 TRACE_Q("add_tx_queue %x, length %d, tx_first = %x", 270 TRACE_Q("add_tx_queue %p, length %d, tx_first = %p",
271 (int)pHeader, pHeader->length, (int)pInfo->tx_first ); 271 pHeader, pHeader->length, pInfo->tx_first );
272} 272}
273 273
274static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code) 274static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
@@ -285,10 +285,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
285 return; 285 return;
286 286
287#ifdef DEBUG_QUEUE 287#ifdef DEBUG_QUEUE
288 printk("r3964: remove_from_tx_queue: %x, length %d - ", 288 printk("r3964: remove_from_tx_queue: %p, length %u - ",
289 (int)pHeader, (int)pHeader->length ); 289 pHeader, pHeader->length );
290 for(pDump=pHeader;pDump;pDump=pDump->next) 290 for(pDump=pHeader;pDump;pDump=pDump->next)
291 printk("%x ", (int)pDump); 291 printk("%p ", pDump);
292 printk("\n"); 292 printk("\n");
293#endif 293#endif
294 294
@@ -319,10 +319,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
319 spin_unlock_irqrestore(&pInfo->lock, flags); 319 spin_unlock_irqrestore(&pInfo->lock, flags);
320 320
321 kfree(pHeader); 321 kfree(pHeader);
322 TRACE_M("remove_from_tx_queue - kfree %x",(int)pHeader); 322 TRACE_M("remove_from_tx_queue - kfree %p",pHeader);
323 323
324 TRACE_Q("remove_from_tx_queue: tx_first = %x, tx_last = %x", 324 TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p",
325 (int)pInfo->tx_first, (int)pInfo->tx_last ); 325 pInfo->tx_first, pInfo->tx_last );
326} 326}
327 327
328static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader) 328static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader)
@@ -346,9 +346,9 @@ static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
346 346
347 spin_unlock_irqrestore(&pInfo->lock, flags); 347 spin_unlock_irqrestore(&pInfo->lock, flags);
348 348
349 TRACE_Q("add_rx_queue: %x, length = %d, rx_first = %x, count = %d", 349 TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d",
350 (int)pHeader, pHeader->length, 350 pHeader, pHeader->length,
351 (int)pInfo->rx_first, pInfo->blocks_in_rx_queue); 351 pInfo->rx_first, pInfo->blocks_in_rx_queue);
352} 352}
353 353
354static void remove_from_rx_queue(struct r3964_info *pInfo, 354static void remove_from_rx_queue(struct r3964_info *pInfo,
@@ -360,10 +360,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
360 if(pHeader==NULL) 360 if(pHeader==NULL)
361 return; 361 return;
362 362
363 TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", 363 TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
364 (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); 364 pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
365 TRACE_Q("remove_from_rx_queue: %x, length %d", 365 TRACE_Q("remove_from_rx_queue: %p, length %u",
366 (int)pHeader, (int)pHeader->length ); 366 pHeader, pHeader->length );
367 367
368 spin_lock_irqsave(&pInfo->lock, flags); 368 spin_lock_irqsave(&pInfo->lock, flags);
369 369
@@ -401,10 +401,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
401 spin_unlock_irqrestore(&pInfo->lock, flags); 401 spin_unlock_irqrestore(&pInfo->lock, flags);
402 402
403 kfree(pHeader); 403 kfree(pHeader);
404 TRACE_M("remove_from_rx_queue - kfree %x",(int)pHeader); 404 TRACE_M("remove_from_rx_queue - kfree %p",pHeader);
405 405
406 TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", 406 TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
407 (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); 407 pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
408} 408}
409 409
410static void put_char(struct r3964_info *pInfo, unsigned char ch) 410static void put_char(struct r3964_info *pInfo, unsigned char ch)
@@ -506,8 +506,8 @@ static void transmit_block(struct r3964_info *pInfo)
506 if(tty->driver->write_room) 506 if(tty->driver->write_room)
507 room=tty->driver->write_room(tty); 507 room=tty->driver->write_room(tty);
508 508
509 TRACE_PS("transmit_block %x, room %d, length %d", 509 TRACE_PS("transmit_block %p, room %d, length %d",
510 (int)pBlock, room, pBlock->length); 510 pBlock, room, pBlock->length);
511 511
512 while(pInfo->tx_position < pBlock->length) 512 while(pInfo->tx_position < pBlock->length)
513 { 513 {
@@ -588,7 +588,7 @@ static void on_receive_block(struct r3964_info *pInfo)
588 588
589 /* prepare struct r3964_block_header: */ 589 /* prepare struct r3964_block_header: */
590 pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL); 590 pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL);
591 TRACE_M("on_receive_block - kmalloc %x",(int)pBlock); 591 TRACE_M("on_receive_block - kmalloc %p",pBlock);
592 592
593 if(pBlock==NULL) 593 if(pBlock==NULL)
594 return; 594 return;
@@ -868,11 +868,11 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
868 if(pMsg) 868 if(pMsg)
869 { 869 {
870 kfree(pMsg); 870 kfree(pMsg);
871 TRACE_M("enable_signals - msg kfree %x",(int)pMsg); 871 TRACE_M("enable_signals - msg kfree %p",pMsg);
872 } 872 }
873 } 873 }
874 kfree(pClient); 874 kfree(pClient);
875 TRACE_M("enable_signals - kfree %x",(int)pClient); 875 TRACE_M("enable_signals - kfree %p",pClient);
876 return 0; 876 return 0;
877 } 877 }
878 } 878 }
@@ -890,7 +890,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
890 { 890 {
891 /* add client to client list */ 891 /* add client to client list */
892 pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL); 892 pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL);
893 TRACE_M("enable_signals - kmalloc %x",(int)pClient); 893 TRACE_M("enable_signals - kmalloc %p",pClient);
894 if(pClient==NULL) 894 if(pClient==NULL)
895 return -ENOMEM; 895 return -ENOMEM;
896 896
@@ -954,7 +954,7 @@ static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
954queue_the_message: 954queue_the_message:
955 955
956 pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL); 956 pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL);
957 TRACE_M("add_msg - kmalloc %x",(int)pMsg); 957 TRACE_M("add_msg - kmalloc %p",pMsg);
958 if(pMsg==NULL) { 958 if(pMsg==NULL) {
959 return; 959 return;
960 } 960 }
@@ -1067,11 +1067,11 @@ static int r3964_open(struct tty_struct *tty)
1067 struct r3964_info *pInfo; 1067 struct r3964_info *pInfo;
1068 1068
1069 TRACE_L("open"); 1069 TRACE_L("open");
1070 TRACE_L("tty=%x, PID=%d, disc_data=%x", 1070 TRACE_L("tty=%p, PID=%d, disc_data=%p",
1071 (int)tty, current->pid, (int)tty->disc_data); 1071 tty, current->pid, tty->disc_data);
1072 1072
1073 pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL); 1073 pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL);
1074 TRACE_M("r3964_open - info kmalloc %x",(int)pInfo); 1074 TRACE_M("r3964_open - info kmalloc %p",pInfo);
1075 1075
1076 if(!pInfo) 1076 if(!pInfo)
1077 { 1077 {
@@ -1080,26 +1080,26 @@ static int r3964_open(struct tty_struct *tty)
1080 } 1080 }
1081 1081
1082 pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); 1082 pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
1083 TRACE_M("r3964_open - rx_buf kmalloc %x",(int)pInfo->rx_buf); 1083 TRACE_M("r3964_open - rx_buf kmalloc %p",pInfo->rx_buf);
1084 1084
1085 if(!pInfo->rx_buf) 1085 if(!pInfo->rx_buf)
1086 { 1086 {
1087 printk(KERN_ERR "r3964: failed to alloc receive buffer\n"); 1087 printk(KERN_ERR "r3964: failed to alloc receive buffer\n");
1088 kfree(pInfo); 1088 kfree(pInfo);
1089 TRACE_M("r3964_open - info kfree %x",(int)pInfo); 1089 TRACE_M("r3964_open - info kfree %p",pInfo);
1090 return -ENOMEM; 1090 return -ENOMEM;
1091 } 1091 }
1092 1092
1093 pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL); 1093 pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL);
1094 TRACE_M("r3964_open - tx_buf kmalloc %x",(int)pInfo->tx_buf); 1094 TRACE_M("r3964_open - tx_buf kmalloc %p",pInfo->tx_buf);
1095 1095
1096 if(!pInfo->tx_buf) 1096 if(!pInfo->tx_buf)
1097 { 1097 {
1098 printk(KERN_ERR "r3964: failed to alloc transmit buffer\n"); 1098 printk(KERN_ERR "r3964: failed to alloc transmit buffer\n");
1099 kfree(pInfo->rx_buf); 1099 kfree(pInfo->rx_buf);
1100 TRACE_M("r3964_open - rx_buf kfree %x",(int)pInfo->rx_buf); 1100 TRACE_M("r3964_open - rx_buf kfree %p",pInfo->rx_buf);
1101 kfree(pInfo); 1101 kfree(pInfo);
1102 TRACE_M("r3964_open - info kfree %x",(int)pInfo); 1102 TRACE_M("r3964_open - info kfree %p",pInfo);
1103 return -ENOMEM; 1103 return -ENOMEM;
1104 } 1104 }
1105 1105
@@ -1154,11 +1154,11 @@ static void r3964_close(struct tty_struct *tty)
1154 if(pMsg) 1154 if(pMsg)
1155 { 1155 {
1156 kfree(pMsg); 1156 kfree(pMsg);
1157 TRACE_M("r3964_close - msg kfree %x",(int)pMsg); 1157 TRACE_M("r3964_close - msg kfree %p",pMsg);
1158 } 1158 }
1159 } 1159 }
1160 kfree(pClient); 1160 kfree(pClient);
1161 TRACE_M("r3964_close - client kfree %x",(int)pClient); 1161 TRACE_M("r3964_close - client kfree %p",pClient);
1162 pClient=pNext; 1162 pClient=pNext;
1163 } 1163 }
1164 /* Remove jobs from tx_queue: */ 1164 /* Remove jobs from tx_queue: */
@@ -1177,11 +1177,11 @@ static void r3964_close(struct tty_struct *tty)
1177 /* Free buffers: */ 1177 /* Free buffers: */
1178 wake_up_interruptible(&pInfo->read_wait); 1178 wake_up_interruptible(&pInfo->read_wait);
1179 kfree(pInfo->rx_buf); 1179 kfree(pInfo->rx_buf);
1180 TRACE_M("r3964_close - rx_buf kfree %x",(int)pInfo->rx_buf); 1180 TRACE_M("r3964_close - rx_buf kfree %p",pInfo->rx_buf);
1181 kfree(pInfo->tx_buf); 1181 kfree(pInfo->tx_buf);
1182 TRACE_M("r3964_close - tx_buf kfree %x",(int)pInfo->tx_buf); 1182 TRACE_M("r3964_close - tx_buf kfree %p",pInfo->tx_buf);
1183 kfree(pInfo); 1183 kfree(pInfo);
1184 TRACE_M("r3964_close - info kfree %x",(int)pInfo); 1184 TRACE_M("r3964_close - info kfree %p",pInfo);
1185} 1185}
1186 1186
1187static ssize_t r3964_read(struct tty_struct *tty, struct file *file, 1187static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
@@ -1234,7 +1234,7 @@ repeat:
1234 count = sizeof(struct r3964_client_message); 1234 count = sizeof(struct r3964_client_message);
1235 1235
1236 kfree(pMsg); 1236 kfree(pMsg);
1237 TRACE_M("r3964_read - msg kfree %x",(int)pMsg); 1237 TRACE_M("r3964_read - msg kfree %p",pMsg);
1238 1238
1239 if (copy_to_user(buf,&theMsg, count)) 1239 if (copy_to_user(buf,&theMsg, count))
1240 return -EFAULT; 1240 return -EFAULT;
@@ -1279,7 +1279,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1279 * Allocate a buffer for the data and copy it from the buffer with header prepended 1279 * Allocate a buffer for the data and copy it from the buffer with header prepended
1280 */ 1280 */
1281 new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL); 1281 new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL);
1282 TRACE_M("r3964_write - kmalloc %x",(int)new_data); 1282 TRACE_M("r3964_write - kmalloc %p",new_data);
1283 if (new_data == NULL) { 1283 if (new_data == NULL) {
1284 if (pInfo->flags & R3964_DEBUG) 1284 if (pInfo->flags & R3964_DEBUG)
1285 { 1285 {
diff --git a/drivers/char/watchdog/mv64x60_wdt.c b/drivers/char/watchdog/mv64x60_wdt.c
index 1436aea3b28f..6d3ff0836c44 100644
--- a/drivers/char/watchdog/mv64x60_wdt.c
+++ b/drivers/char/watchdog/mv64x60_wdt.c
@@ -87,6 +87,8 @@ static int mv64x60_wdt_open(struct inode *inode, struct file *file)
87 mv64x60_wdt_service(); 87 mv64x60_wdt_service();
88 mv64x60_wdt_handler_enable(); 88 mv64x60_wdt_handler_enable();
89 89
90 nonseekable_open(inode, file);
91
90 return 0; 92 return 0;
91} 93}
92 94
@@ -103,12 +105,9 @@ static int mv64x60_wdt_release(struct inode *inode, struct file *file)
103 return 0; 105 return 0;
104} 106}
105 107
106static ssize_t mv64x60_wdt_write(struct file *file, const char *data, 108static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
107 size_t len, loff_t * ppos) 109 size_t len, loff_t * ppos)
108{ 110{
109 if (*ppos != file->f_pos)
110 return -ESPIPE;
111
112 if (len) 111 if (len)
113 mv64x60_wdt_service(); 112 mv64x60_wdt_service();
114 113
@@ -119,6 +118,7 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
119 unsigned int cmd, unsigned long arg) 118 unsigned int cmd, unsigned long arg)
120{ 119{
121 int timeout; 120 int timeout;
121 void __user *argp = (void __user *)arg;
122 static struct watchdog_info info = { 122 static struct watchdog_info info = {
123 .options = WDIOF_KEEPALIVEPING, 123 .options = WDIOF_KEEPALIVEPING,
124 .firmware_version = 0, 124 .firmware_version = 0,
@@ -127,13 +127,13 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
127 127
128 switch (cmd) { 128 switch (cmd) {
129 case WDIOC_GETSUPPORT: 129 case WDIOC_GETSUPPORT:
130 if (copy_to_user((void *)arg, &info, sizeof(info))) 130 if (copy_to_user(argp, &info, sizeof(info)))
131 return -EFAULT; 131 return -EFAULT;
132 break; 132 break;
133 133
134 case WDIOC_GETSTATUS: 134 case WDIOC_GETSTATUS:
135 case WDIOC_GETBOOTSTATUS: 135 case WDIOC_GETBOOTSTATUS:
136 if (put_user(wdt_status, (int *)arg)) 136 if (put_user(wdt_status, (int __user *)argp))
137 return -EFAULT; 137 return -EFAULT;
138 wdt_status &= ~WDIOF_KEEPALIVEPING; 138 wdt_status &= ~WDIOF_KEEPALIVEPING;
139 break; 139 break;
@@ -154,7 +154,7 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
154 154
155 case WDIOC_GETTIMEOUT: 155 case WDIOC_GETTIMEOUT:
156 timeout = mv64x60_wdt_timeout * HZ; 156 timeout = mv64x60_wdt_timeout * HZ;
157 if (put_user(timeout, (int *)arg)) 157 if (put_user(timeout, (int __user *)argp))
158 return -EFAULT; 158 return -EFAULT;
159 break; 159 break;
160 160
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 966632182e2d..9f2f00d82917 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,16 +31,19 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34static void cn_queue_wrapper(void *data) 34void cn_queue_wrapper(void *data)
35{ 35{
36 struct cn_callback_entry *cbq = data; 36 struct cn_callback_data *d = data;
37 37
38 cbq->cb->callback(cbq->cb->priv); 38 d->callback(d->callback_priv);
39 cbq->destruct_data(cbq->ddata); 39
40 cbq->ddata = NULL; 40 d->destruct_data(d->ddata);
41 d->ddata = NULL;
42
43 kfree(d->free);
41} 44}
42 45
43static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) 46static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
44{ 47{
45 struct cn_callback_entry *cbq; 48 struct cn_callback_entry *cbq;
46 49
@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac
50 return NULL; 53 return NULL;
51 } 54 }
52 55
53 cbq->cb = cb; 56 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
54 INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); 57 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
58 cbq->data.callback = callback;
59
60 INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
55 return cbq; 61 return cbq;
56} 62}
57 63
@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
68 return ((i1->idx == i2->idx) && (i1->val == i2->val)); 74 return ((i1->idx == i2->idx) && (i1->val == i2->val));
69} 75}
70 76
71int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) 77int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
72{ 78{
73 struct cn_callback_entry *cbq, *__cbq; 79 struct cn_callback_entry *cbq, *__cbq;
74 int found = 0; 80 int found = 0;
75 81
76 cbq = cn_queue_alloc_callback_entry(cb); 82 cbq = cn_queue_alloc_callback_entry(name, id, callback);
77 if (!cbq) 83 if (!cbq)
78 return -ENOMEM; 84 return -ENOMEM;
79 85
@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
82 88
83 spin_lock_bh(&dev->queue_lock); 89 spin_lock_bh(&dev->queue_lock);
84 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { 90 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
85 if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { 91 if (cn_cb_equal(&__cbq->id.id, id)) {
86 found = 1; 92 found = 1;
87 break; 93 break;
88 } 94 }
@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
99 105
100 cbq->nls = dev->nls; 106 cbq->nls = dev->nls;
101 cbq->seq = 0; 107 cbq->seq = 0;
102 cbq->group = cbq->cb->id.idx; 108 cbq->group = cbq->id.id.idx;
103 109
104 return 0; 110 return 0;
105} 111}
@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
111 117
112 spin_lock_bh(&dev->queue_lock); 118 spin_lock_bh(&dev->queue_lock);
113 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { 119 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
114 if (cn_cb_equal(&cbq->cb->id, id)) { 120 if (cn_cb_equal(&cbq->id.id, id)) {
115 list_del(&cbq->callback_entry); 121 list_del(&cbq->callback_entry);
116 found = 1; 122 found = 1;
117 break; 123 break;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index aaf6d468a8b9..bb0b3a8de14b 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
84 spin_lock_bh(&dev->cbdev->queue_lock); 84 spin_lock_bh(&dev->cbdev->queue_lock);
85 list_for_each_entry(__cbq, &dev->cbdev->queue_list, 85 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
86 callback_entry) { 86 callback_entry) {
87 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { 87 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
88 found = 1; 88 found = 1;
89 group = __cbq->group; 89 group = __cbq->group;
90 } 90 }
@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
127{ 127{
128 struct cn_callback_entry *__cbq; 128 struct cn_callback_entry *__cbq;
129 struct cn_dev *dev = &cdev; 129 struct cn_dev *dev = &cdev;
130 int found = 0; 130 int err = -ENODEV;
131 131
132 spin_lock_bh(&dev->cbdev->queue_lock); 132 spin_lock_bh(&dev->cbdev->queue_lock);
133 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 133 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
134 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { 134 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
135 /*
136 * Let's scream if there is some magic and the
137 * data will arrive asynchronously here.
138 * [i.e. netlink messages will be queued].
139 * After the first warning I will fix it
140 * quickly, but now I think it is
141 * impossible. --zbr (2004_04_27).
142 */
143 if (likely(!test_bit(0, &__cbq->work.pending) && 135 if (likely(!test_bit(0, &__cbq->work.pending) &&
144 __cbq->ddata == NULL)) { 136 __cbq->data.ddata == NULL)) {
145 __cbq->cb->priv = msg; 137 __cbq->data.callback_priv = msg;
146 138
147 __cbq->ddata = data; 139 __cbq->data.ddata = data;
148 __cbq->destruct_data = destruct_data; 140 __cbq->data.destruct_data = destruct_data;
149 141
150 if (queue_work(dev->cbdev->cn_queue, 142 if (queue_work(dev->cbdev->cn_queue,
151 &__cbq->work)) 143 &__cbq->work))
152 found = 1; 144 err = 0;
153 } else { 145 } else {
154 printk("%s: cbq->data=%p, " 146 struct work_struct *w;
155 "work->pending=%08lx.\n", 147 struct cn_callback_data *d;
156 __func__, __cbq->ddata, 148
157 __cbq->work.pending); 149 w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
158 WARN_ON(1); 150 if (w) {
151 d = (struct cn_callback_data *)(w+1);
152
153 d->callback_priv = msg;
154 d->callback = __cbq->data.callback;
155 d->ddata = data;
156 d->destruct_data = destruct_data;
157 d->free = w;
158
159 INIT_LIST_HEAD(&w->entry);
160 w->pending = 0;
161 w->func = &cn_queue_wrapper;
162 w->data = d;
163 init_timer(&w->timer);
164
165 if (queue_work(dev->cbdev->cn_queue, w))
166 err = 0;
167 else {
168 kfree(w);
169 err = -EINVAL;
170 }
171 } else
172 err = -ENOMEM;
159 } 173 }
160 break; 174 break;
161 } 175 }
162 } 176 }
163 spin_unlock_bh(&dev->cbdev->queue_lock); 177 spin_unlock_bh(&dev->cbdev->queue_lock);
164 178
165 return found ? 0 : -ENODEV; 179 return err;
166} 180}
167 181
168/* 182/*
@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
291{ 305{
292 int err; 306 int err;
293 struct cn_dev *dev = &cdev; 307 struct cn_dev *dev = &cdev;
294 struct cn_callback *cb;
295
296 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
297 if (!cb)
298 return -ENOMEM;
299
300 scnprintf(cb->name, sizeof(cb->name), "%s", name);
301 308
302 memcpy(&cb->id, id, sizeof(cb->id)); 309 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
303 cb->callback = callback; 310 if (err)
304
305 err = cn_queue_add_callback(dev->cbdev, cb);
306 if (err) {
307 kfree(cb);
308 return err; 311 return err;
309 }
310 312
311 cn_notify(id, 0); 313 cn_notify(id, 0);
312 314
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7e72e922b41c..db358cfa7cbf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -418,12 +418,11 @@ config SENSORS_HDAPS
418 help 418 help
419 This driver provides support for the IBM Hard Drive Active Protection 419 This driver provides support for the IBM Hard Drive Active Protection
420 System (hdaps), which provides an accelerometer and other misc. data. 420 System (hdaps), which provides an accelerometer and other misc. data.
421 Supported laptops include the IBM ThinkPad T41, T42, T43, and R51. 421 ThinkPads starting with the R50, T41, and X40 are supported. The
422 The accelerometer data is readable via sysfs. 422 accelerometer data is readable via sysfs.
423 423
424 This driver also provides an input class device, allowing the 424 This driver also provides an absolute input class device, allowing
425 laptop to act as a pinball machine-esque mouse. This is off by 425 the laptop to act as a pinball machine-esque joystick.
426 default but enabled via sysfs or the module parameter "mousedev".
427 426
428 Say Y here if you have an applicable laptop and want to experience 427 Say Y here if you have an applicable laptop and want to experience
429 the awesome power of hdaps. 428 the awesome power of hdaps.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 4c56411f3993..7f0107613827 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -4,9 +4,9 @@
4 * Copyright (C) 2005 Robert Love <rml@novell.com> 4 * Copyright (C) 2005 Robert Love <rml@novell.com>
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad 7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
8 * T41, T42, T43, R50, R50p, R51, and X40, at least. It provides a basic 8 * starting with the R40, T41, and X40. It provides a basic two-axis
9 * two-axis accelerometer and other data, such as the device's temperature. 9 * accelerometer and other data, such as the device's temperature.
10 * 10 *
11 * This driver is based on the document by Mark A. Smith available at 11 * This driver is based on the document by Mark A. Smith available at
12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial 12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
@@ -487,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = {
487 487
488/* Module stuff */ 488/* Module stuff */
489 489
490/* 490/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
491 * XXX: We should be able to return nonzero and halt the detection process.
492 * But there is a bug in dmi_check_system() where a nonzero return from the
493 * first match will result in a return of failure from dmi_check_system().
494 * I fixed this; the patch is 2.6-git. Once in a released tree, we can make
495 * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
496 */
497static int hdaps_dmi_match(struct dmi_system_id *id) 491static int hdaps_dmi_match(struct dmi_system_id *id)
498{ 492{
499 printk(KERN_INFO "hdaps: %s detected.\n", id->ident); 493 printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
500 return 0; 494 return 1;
501} 495}
502 496
497/* hdaps_dmi_match_invert - found an inverted match. */
503static int hdaps_dmi_match_invert(struct dmi_system_id *id) 498static int hdaps_dmi_match_invert(struct dmi_system_id *id)
504{ 499{
505 hdaps_invert = 1; 500 hdaps_invert = 1;
506 printk(KERN_INFO "hdaps: inverting axis readings.\n"); 501 printk(KERN_INFO "hdaps: inverting axis readings.\n");
507 return 0; 502 return hdaps_dmi_match(id);
508} 503}
509 504
510#define HDAPS_DMI_MATCH_NORMAL(model) { \ 505#define HDAPS_DMI_MATCH_NORMAL(model) { \
@@ -534,6 +529,7 @@ static int __init hdaps_init(void)
534 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"), 529 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
535 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"), 530 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
536 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"), 531 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
532 HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
537 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"), 533 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
538 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"), 534 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
539 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"), 535 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
@@ -541,6 +537,7 @@ static int __init hdaps_init(void)
541 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"), 537 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
542 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"), 538 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
543 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"), 539 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
540 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
544 { .ident = NULL } 541 { .ident = NULL }
545 }; 542 };
546 543
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8334496a7e0a..3badfec75b1c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -245,6 +245,18 @@ config I2C_KEYWEST
245 This support is also available as a module. If so, the module 245 This support is also available as a module. If so, the module
246 will be called i2c-keywest. 246 will be called i2c-keywest.
247 247
248config I2C_PMAC_SMU
249 tristate "Powermac SMU I2C interface"
250 depends on I2C && PMAC_SMU
251 help
252 This supports the use of the I2C interface in the SMU
253 chip on recent Apple machines like the iMac G5. It is used
254 among others by the thermal control driver for those machines.
255 Say Y if you have such a machine.
256
257 This support is also available as a module. If so, the module
258 will be called i2c-pmac-smu.
259
248config I2C_MPC 260config I2C_MPC
249 tristate "MPC107/824x/85xx/52xx" 261 tristate "MPC107/824x/85xx/52xx"
250 depends on I2C && PPC32 262 depends on I2C && PPC32
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 980b3e983670..f1df00f66c6c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o
20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o 20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o 21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o 22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
23obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
23obj-$(CONFIG_I2C_MPC) += i2c-mpc.o 24obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
24obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o 25obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
25obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o 26obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c
new file mode 100644
index 000000000000..8a9f5648a23d
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmac-smu.c
@@ -0,0 +1,316 @@
1/*
2 i2c Support for Apple SMU Controller
3
4 Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
5 <benh@kernel.crashing.org>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21*/
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/i2c.h>
28#include <linux/init.h>
29#include <linux/completion.h>
30#include <linux/device.h>
31#include <asm/prom.h>
32#include <asm/of_device.h>
33#include <asm/smu.h>
34
35static int probe;
36
37MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
38MODULE_DESCRIPTION("I2C driver for Apple's SMU");
39MODULE_LICENSE("GPL");
40module_param(probe, bool, 0);
41
42
43/* Physical interface */
44struct smu_iface
45{
46 struct i2c_adapter adapter;
47 struct completion complete;
48 u32 busid;
49};
50
51static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
52{
53 struct smu_iface *iface = misc;
54 complete(&iface->complete);
55}
56
57/*
58 * SMBUS-type transfer entrypoint
59 */
60static s32 smu_smbus_xfer( struct i2c_adapter* adap,
61 u16 addr,
62 unsigned short flags,
63 char read_write,
64 u8 command,
65 int size,
66 union i2c_smbus_data* data)
67{
68 struct smu_iface *iface = i2c_get_adapdata(adap);
69 struct smu_i2c_cmd cmd;
70 int rc = 0;
71 int read = (read_write == I2C_SMBUS_READ);
72
73 cmd.info.bus = iface->busid;
74 cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
75
76 /* Prepare datas & select mode */
77 switch (size) {
78 case I2C_SMBUS_QUICK:
79 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
80 cmd.info.datalen = 0;
81 break;
82 case I2C_SMBUS_BYTE:
83 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
84 cmd.info.datalen = 1;
85 if (!read)
86 cmd.info.data[0] = data->byte;
87 break;
88 case I2C_SMBUS_BYTE_DATA:
89 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
90 cmd.info.datalen = 1;
91 cmd.info.sublen = 1;
92 cmd.info.subaddr[0] = command;
93 cmd.info.subaddr[1] = 0;
94 cmd.info.subaddr[2] = 0;
95 if (!read)
96 cmd.info.data[0] = data->byte;
97 break;
98 case I2C_SMBUS_WORD_DATA:
99 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
100 cmd.info.datalen = 2;
101 cmd.info.sublen = 1;
102 cmd.info.subaddr[0] = command;
103 cmd.info.subaddr[1] = 0;
104 cmd.info.subaddr[2] = 0;
105 if (!read) {
106 cmd.info.data[0] = data->byte & 0xff;
107 cmd.info.data[1] = (data->byte >> 8) & 0xff;
108 }
109 break;
110 /* Note that these are broken vs. the expected smbus API where
111 * on reads, the lenght is actually returned from the function,
112 * but I think the current API makes no sense and I don't want
113 * any driver that I haven't verified for correctness to go
114 * anywhere near a pmac i2c bus anyway ...
115 */
116 case I2C_SMBUS_BLOCK_DATA:
117 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
118 cmd.info.datalen = data->block[0] + 1;
119 if (cmd.info.datalen > 6)
120 return -EINVAL;
121 if (!read)
122 memcpy(cmd.info.data, data->block, cmd.info.datalen);
123 cmd.info.sublen = 1;
124 cmd.info.subaddr[0] = command;
125 cmd.info.subaddr[1] = 0;
126 cmd.info.subaddr[2] = 0;
127 break;
128 case I2C_SMBUS_I2C_BLOCK_DATA:
129 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
130 cmd.info.datalen = data->block[0];
131 if (cmd.info.datalen > 7)
132 return -EINVAL;
133 if (!read)
134 memcpy(cmd.info.data, &data->block[1],
135 cmd.info.datalen);
136 cmd.info.sublen = 1;
137 cmd.info.subaddr[0] = command;
138 cmd.info.subaddr[1] = 0;
139 cmd.info.subaddr[2] = 0;
140 break;
141
142 default:
143 return -EINVAL;
144 }
145
146 /* Turn a standardsub read into a combined mode access */
147 if (read_write == I2C_SMBUS_READ &&
148 cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
149 cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
150
151 /* Finish filling command and submit it */
152 cmd.done = smu_i2c_done;
153 cmd.misc = iface;
154 rc = smu_queue_i2c(&cmd);
155 if (rc < 0)
156 return rc;
157 wait_for_completion(&iface->complete);
158 rc = cmd.status;
159
160 if (!read || rc < 0)
161 return rc;
162
163 switch (size) {
164 case I2C_SMBUS_BYTE:
165 case I2C_SMBUS_BYTE_DATA:
166 data->byte = cmd.info.data[0];
167 break;
168 case I2C_SMBUS_WORD_DATA:
169 data->word = ((u16)cmd.info.data[1]) << 8;
170 data->word |= cmd.info.data[0];
171 break;
172 /* Note that these are broken vs. the expected smbus API where
173 * on reads, the lenght is actually returned from the function,
174 * but I think the current API makes no sense and I don't want
175 * any driver that I haven't verified for correctness to go
176 * anywhere near a pmac i2c bus anyway ...
177 */
178 case I2C_SMBUS_BLOCK_DATA:
179 case I2C_SMBUS_I2C_BLOCK_DATA:
180 memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
181 break;
182 }
183
184 return rc;
185}
186
187static u32
188smu_smbus_func(struct i2c_adapter * adapter)
189{
190 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
191 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
192 I2C_FUNC_SMBUS_BLOCK_DATA;
193}
194
195/* For now, we only handle combined mode (smbus) */
196static struct i2c_algorithm smu_algorithm = {
197 .smbus_xfer = smu_smbus_xfer,
198 .functionality = smu_smbus_func,
199};
200
201static int create_iface(struct device_node *np, struct device *dev)
202{
203 struct smu_iface* iface;
204 u32 *reg, busid;
205 int rc;
206
207 reg = (u32 *)get_property(np, "reg", NULL);
208 if (reg == NULL) {
209 printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
210 return -ENXIO;
211 }
212 busid = *reg;
213
214 iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL);
215 if (iface == NULL) {
216 printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
217 return -ENOMEM;
218 }
219 memset(iface, 0, sizeof(struct smu_iface));
220 init_completion(&iface->complete);
221 iface->busid = busid;
222
223 dev_set_drvdata(dev, iface);
224
225 sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
226 iface->adapter.algo = &smu_algorithm;
227 iface->adapter.algo_data = NULL;
228 iface->adapter.client_register = NULL;
229 iface->adapter.client_unregister = NULL;
230 i2c_set_adapdata(&iface->adapter, iface);
231 iface->adapter.dev.parent = dev;
232
233 rc = i2c_add_adapter(&iface->adapter);
234 if (rc) {
235 printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
236 "failed\n", iface->adapter.name);
237 i2c_set_adapdata(&iface->adapter, NULL);
238 }
239
240 if (probe) {
241 unsigned char addr;
242 printk("Probe: ");
243 for (addr = 0x00; addr <= 0x7f; addr++) {
244 if (i2c_smbus_xfer(&iface->adapter,addr,
245 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
246 printk("%02x ", addr);
247 }
248 printk("\n");
249 }
250
251 printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
252
253 return 0;
254}
255
256static int dispose_iface(struct device *dev)
257{
258 struct smu_iface *iface = dev_get_drvdata(dev);
259 int rc;
260
261 rc = i2c_del_adapter(&iface->adapter);
262 i2c_set_adapdata(&iface->adapter, NULL);
263 /* We aren't that prepared to deal with this... */
264 if (rc)
265 printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
266 iface->adapter.name);
267 dev_set_drvdata(dev, NULL);
268 kfree(iface);
269
270 return 0;
271}
272
273
274static int create_iface_of_platform(struct of_device* dev,
275 const struct of_device_id *match)
276{
277 return create_iface(dev->node, &dev->dev);
278}
279
280
281static int dispose_iface_of_platform(struct of_device* dev)
282{
283 return dispose_iface(&dev->dev);
284}
285
286
287static struct of_device_id i2c_smu_match[] =
288{
289 {
290 .compatible = "smu-i2c",
291 },
292 {},
293};
294static struct of_platform_driver i2c_smu_of_platform_driver =
295{
296 .name = "i2c-smu",
297 .match_table = i2c_smu_match,
298 .probe = create_iface_of_platform,
299 .remove = dispose_iface_of_platform
300};
301
302
303static int __init i2c_pmac_smu_init(void)
304{
305 of_register_driver(&i2c_smu_of_platform_driver);
306 return 0;
307}
308
309
310static void __exit i2c_pmac_smu_cleanup(void)
311{
312 of_unregister_driver(&i2c_smu_of_platform_driver);
313}
314
315module_init(i2c_pmac_smu_init);
316module_exit(i2c_pmac_smu_cleanup);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 0ccf85fcee34..a35a58bef1a4 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -477,7 +477,7 @@ static struct pcmcia_device_id ide_ids[] = {
477 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 477 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
478 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), 478 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
479 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), 479 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
480 PCMCIA_DEVICE_PROD_ID12(" ", "NinjaATA-", 0x3b6e20c8, 0xebe0bd79), 480 PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
481 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), 481 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
482 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), 482 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
483 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), 483 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
index 84ae027b021a..e8e28569a668 100644
--- a/drivers/ieee1394/amdtp.c
+++ b/drivers/ieee1394/amdtp.c
@@ -1297,4 +1297,3 @@ static void __exit amdtp_exit_module (void)
1297 1297
1298module_init(amdtp_init_module); 1298module_init(amdtp_init_module);
1299module_exit(amdtp_exit_module); 1299module_exit(amdtp_exit_module);
1300MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16);
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index e6734263a1d3..28c5f4b726e2 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -37,7 +37,6 @@
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
42#include <asm/pgalloc.h> 41#include <asm/pgalloc.h>
43 42
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 4538b0235ca3..e34730c7a874 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2660,4 +2660,3 @@ static int __init dv1394_init_module(void)
2660 2660
2661module_init(dv1394_init_module); 2661module_init(dv1394_init_module);
2662module_exit(dv1394_exit_module); 2662module_exit(dv1394_exit_module);
2663MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16);
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index cd53c174ced1..4802bbbb6dc9 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -89,7 +89,7 @@
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) 89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90 90
91static char version[] __devinitdata = 91static char version[] __devinitdata =
92 "$Rev: 1264 $ Ben Collins <bcollins@debian.org>"; 92 "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
93 93
94struct fragment_info { 94struct fragment_info {
95 struct list_head list; 95 struct list_head list;
@@ -221,9 +221,7 @@ static int ether1394_open (struct net_device *dev)
221 if (priv->bc_state == ETHER1394_BC_ERROR) { 221 if (priv->bc_state == ETHER1394_BC_ERROR) {
222 /* we'll try again */ 222 /* we'll try again */
223 priv->iso = hpsb_iso_recv_init(priv->host, 223 priv->iso = hpsb_iso_recv_init(priv->host,
224 ETHER1394_GASP_BUFFERS * 2 * 224 ETHER1394_ISO_BUF_SIZE,
225 (1 << (priv->host->csr.max_rec +
226 1)),
227 ETHER1394_GASP_BUFFERS, 225 ETHER1394_GASP_BUFFERS,
228 priv->broadcast_channel, 226 priv->broadcast_channel,
229 HPSB_ISO_DMA_PACKET_PER_BUFFER, 227 HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -635,8 +633,8 @@ static void ether1394_add_host (struct hpsb_host *host)
635 * be checked when the eth device is opened. */ 633 * be checked when the eth device is opened. */
636 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f; 634 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
637 635
638 priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 * 636 priv->iso = hpsb_iso_recv_init(host,
639 (1 << (host->csr.max_rec + 1))), 637 ETHER1394_ISO_BUF_SIZE,
640 ETHER1394_GASP_BUFFERS, 638 ETHER1394_GASP_BUFFERS,
641 priv->broadcast_channel, 639 priv->broadcast_channel,
642 HPSB_ISO_DMA_PACKET_PER_BUFFER, 640 HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -1770,7 +1768,7 @@ fail:
1770static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1768static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1771{ 1769{
1772 strcpy (info->driver, driver_name); 1770 strcpy (info->driver, driver_name);
1773 strcpy (info->version, "$Rev: 1264 $"); 1771 strcpy (info->version, "$Rev: 1312 $");
1774 /* FIXME XXX provide sane businfo */ 1772 /* FIXME XXX provide sane businfo */
1775 strcpy (info->bus_info, "ieee1394"); 1773 strcpy (info->bus_info, "ieee1394");
1776} 1774}
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index ed8f1c4b7fd8..a77213cfc483 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -44,6 +44,12 @@
44 44
45#define ETHER1394_GASP_BUFFERS 16 45#define ETHER1394_GASP_BUFFERS 16
46 46
47/* rawiso buffer size - due to a limitation in rawiso, we must limit each
48 * GASP buffer to be less than PAGE_SIZE. */
49#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
50 min((unsigned int)PAGE_SIZE, \
51 2 * (1U << (priv->host->csr.max_rec + 1)))
52
47/* Node set == 64 */ 53/* Node set == 64 */
48#define NODE_SET (ALL_NODES + 1) 54#define NODE_SET (ALL_NODES + 1)
49 55
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index c502c6e9c440..aeeaeb670d03 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/jiffies.h>
21 22
22#include "csr1212.h" 23#include "csr1212.h"
23#include "ieee1394.h" 24#include "ieee1394.h"
@@ -217,7 +218,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
217 218
218 /* IEEE 1394a-2000 prohibits using the same generation number 219 /* IEEE 1394a-2000 prohibits using the same generation number
219 * twice in a 60 second period. */ 220 * twice in a 60 second period. */
220 if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ) 221 if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
221 /* Wait 60 seconds from the last time this generation number was 222 /* Wait 60 seconds from the last time this generation number was
222 * used. */ 223 * used. */
223 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; 224 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index 739e76840d51..38f42112dff0 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -135,17 +135,17 @@ enum isoctl_cmd {
135 135
136enum reset_types { 136enum reset_types {
137 /* 166 microsecond reset -- only type of reset available on 137 /* 166 microsecond reset -- only type of reset available on
138 non-1394a capable IEEE 1394 controllers */ 138 non-1394a capable controllers */
139 LONG_RESET, 139 LONG_RESET,
140 140
141 /* Short (arbitrated) reset -- only available on 1394a capable 141 /* Short (arbitrated) reset -- only available on 1394a capable
142 IEEE 1394 capable controllers */ 142 controllers */
143 SHORT_RESET, 143 SHORT_RESET,
144 144
145 /* Variants, that set force_root before issueing the bus reset */ 145 /* Variants that set force_root before issueing the bus reset */
146 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, 146 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
147 147
148 /* Variants, that clear force_root before issueing the bus reset */ 148 /* Variants that clear force_root before issueing the bus reset */
149 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT 149 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
150}; 150};
151 151
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index d633770fac8e..32a1e016c85e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -70,7 +70,7 @@ const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S32
70struct class *hpsb_protocol_class; 70struct class *hpsb_protocol_class;
71 71
72#ifdef CONFIG_IEEE1394_VERBOSEDEBUG 72#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
73static void dump_packet(const char *text, quadlet_t *data, int size) 73static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
74{ 74{
75 int i; 75 int i;
76 76
@@ -78,12 +78,15 @@ static void dump_packet(const char *text, quadlet_t *data, int size)
78 size = (size > 4 ? 4 : size); 78 size = (size > 4 ? 4 : size);
79 79
80 printk(KERN_DEBUG "ieee1394: %s", text); 80 printk(KERN_DEBUG "ieee1394: %s", text);
81 if (speed > -1 && speed < 6)
82 printk(" at %s", hpsb_speedto_str[speed]);
83 printk(":");
81 for (i = 0; i < size; i++) 84 for (i = 0; i < size; i++)
82 printk(" %08x", data[i]); 85 printk(" %08x", data[i]);
83 printk("\n"); 86 printk("\n");
84} 87}
85#else 88#else
86#define dump_packet(x,y,z) 89#define dump_packet(a,b,c,d)
87#endif 90#endif
88 91
89static void abort_requests(struct hpsb_host *host); 92static void abort_requests(struct hpsb_host *host);
@@ -544,8 +547,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
544 if (packet->data_size) 547 if (packet->data_size)
545 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); 548 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
546 549
547 dump_packet("send packet local:", packet->header, 550 dump_packet("send packet local", packet->header, packet->header_size, -1);
548 packet->header_size);
549 551
550 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); 552 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
551 hpsb_packet_received(host, data, size, 0); 553 hpsb_packet_received(host, data, size, 0);
@@ -561,21 +563,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
561 + NODEID_TO_NODE(packet->node_id)]; 563 + NODEID_TO_NODE(packet->node_id)];
562 } 564 }
563 565
564#ifdef CONFIG_IEEE1394_VERBOSEDEBUG 566 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
565 switch (packet->speed_code) {
566 case 2:
567 dump_packet("send packet 400:", packet->header,
568 packet->header_size);
569 break;
570 case 1:
571 dump_packet("send packet 200:", packet->header,
572 packet->header_size);
573 break;
574 default:
575 dump_packet("send packet 100:", packet->header,
576 packet->header_size);
577 }
578#endif
579 567
580 return host->driver->transmit_packet(host, packet); 568 return host->driver->transmit_packet(host, packet);
581} 569}
@@ -636,7 +624,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
636 624
637 if (packet == NULL) { 625 if (packet == NULL) {
638 HPSB_DEBUG("unsolicited response packet received - no tlabel match"); 626 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
639 dump_packet("contents:", data, 16); 627 dump_packet("contents", data, 16, -1);
640 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 628 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
641 return; 629 return;
642 } 630 }
@@ -677,7 +665,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
677 if (!tcode_match) { 665 if (!tcode_match) {
678 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 666 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
679 HPSB_INFO("unsolicited response packet received - tcode mismatch"); 667 HPSB_INFO("unsolicited response packet received - tcode mismatch");
680 dump_packet("contents:", data, 16); 668 dump_packet("contents", data, 16, -1);
681 return; 669 return;
682 } 670 }
683 671
@@ -914,7 +902,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
914 return; 902 return;
915 } 903 }
916 904
917 dump_packet("received packet:", data, size); 905 dump_packet("received packet", data, size, -1);
918 906
919 tcode = (data[0] >> 4) & 0xf; 907 tcode = (data[0] >> 4) & 0xf;
920 908
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index b23322523ef5..347ece6b583c 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -64,10 +64,10 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
64 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci; 64 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
65 int i, ret = 0; 65 int i, ret = 0;
66 66
67 for (i = 0; i < 3; i++) { 67 for (i = 1; ; i++) {
68 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr, 68 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
69 buffer, length); 69 buffer, length);
70 if (!ret) 70 if (!ret || i == 3)
71 break; 71 break;
72 72
73 if (msleep_interruptible(334)) 73 if (msleep_interruptible(334))
@@ -1438,9 +1438,13 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
1438 if (host->busmgr_id == 0xffff && host->node_count > 1) 1438 if (host->busmgr_id == 0xffff && host->node_count > 1)
1439 { 1439 {
1440 u16 root_node = host->node_count - 1; 1440 u16 root_node = host->node_count - 1;
1441 struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS);
1442 1441
1443 if (ne && ne->busopt.cmc) 1442 /* get cycle master capability flag from root node */
1443 if (host->is_cycmst ||
1444 (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host),
1445 (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)),
1446 &bc, sizeof(quadlet_t)) &&
1447 be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT))
1444 hpsb_send_phy_config(host, root_node, -1); 1448 hpsb_send_phy_config(host, root_node, -1);
1445 else { 1449 else {
1446 HPSB_DEBUG("The root node is not cycle master capable; " 1450 HPSB_DEBUG("The root node is not cycle master capable; "
@@ -1557,24 +1561,19 @@ static int nodemgr_host_thread(void *__hi)
1557 } 1561 }
1558 } 1562 }
1559 1563
1560 if (!nodemgr_check_irm_capability(host, reset_cycles)) { 1564 if (!nodemgr_check_irm_capability(host, reset_cycles) ||
1565 !nodemgr_do_irm_duties(host, reset_cycles)) {
1561 reset_cycles++; 1566 reset_cycles++;
1562 up(&nodemgr_serialize); 1567 up(&nodemgr_serialize);
1563 continue; 1568 continue;
1564 } 1569 }
1570 reset_cycles = 0;
1565 1571
1566 /* Scan our nodes to get the bus options and create node 1572 /* Scan our nodes to get the bus options and create node
1567 * entries. This does not do the sysfs stuff, since that 1573 * entries. This does not do the sysfs stuff, since that
1568 * would trigger hotplug callbacks and such, which is a 1574 * would trigger hotplug callbacks and such, which is a
1569 * bad idea at this point. */ 1575 * bad idea at this point. */
1570 nodemgr_node_scan(hi, generation); 1576 nodemgr_node_scan(hi, generation);
1571 if (!nodemgr_do_irm_duties(host, reset_cycles)) {
1572 reset_cycles++;
1573 up(&nodemgr_serialize);
1574 continue;
1575 }
1576
1577 reset_cycles = 0;
1578 1577
1579 /* This actually does the full probe, with sysfs 1578 /* This actually does the full probe, with sysfs
1580 * registration. */ 1579 * registration. */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 27018c8efc24..6a6acbd80af4 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -162,7 +162,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) 162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163 163
164static char version[] __devinitdata = 164static char version[] __devinitdata =
165 "$Rev: 1299 $ Ben Collins <bcollins@debian.org>"; 165 "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
166 166
167/* Module Parameters */ 167/* Module Parameters */
168static int phys_dma = 1; 168static int phys_dma = 1;
@@ -1084,7 +1084,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
1084 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1); 1084 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1085 1085
1086 if (printk_ratelimit()) 1086 if (printk_ratelimit())
1087 PRINT(KERN_ERR, "IR legacy activated"); 1087 DBGMSG("IR legacy activated");
1088 } 1088 }
1089 1089
1090 spin_lock_irqsave(&ohci->IR_channel_lock, flags); 1090 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index b4fa14793fe5..5fe4f2ba0979 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2958,4 +2958,3 @@ static void __exit cleanup_raw1394(void)
2958module_init(init_raw1394); 2958module_init(init_raw1394);
2959module_exit(cleanup_raw1394); 2959module_exit(cleanup_raw1394);
2960MODULE_LICENSE("GPL"); 2960MODULE_LICENSE("GPL");
2961MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index de88218ef7cc..12cec7c4a342 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -97,16 +97,18 @@ static char version[] __devinitdata =
97 */ 97 */
98static int max_speed = IEEE1394_SPEED_MAX; 98static int max_speed = IEEE1394_SPEED_MAX;
99module_param(max_speed, int, 0644); 99module_param(max_speed, int, 0644);
100MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)"); 100MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)");
101 101
102/* 102/*
103 * Set serialize_io to 1 if you'd like only one scsi command sent 103 * Set serialize_io to 1 if you'd like only one scsi command sent
104 * down to us at a time (debugging). This might be necessary for very 104 * down to us at a time (debugging). This might be necessary for very
105 * badly behaved sbp2 devices. 105 * badly behaved sbp2 devices.
106 *
107 * TODO: Make this configurable per device.
106 */ 108 */
107static int serialize_io; 109static int serialize_io = 1;
108module_param(serialize_io, int, 0444); 110module_param(serialize_io, int, 0444);
109MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)"); 111MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)");
110 112
111/* 113/*
112 * Bump up max_sectors if you'd like to support very large sized 114 * Bump up max_sectors if you'd like to support very large sized
@@ -596,6 +598,14 @@ static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_i
596 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 598 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
597} 599}
598 600
601/*
602 * Is scsi_id valid? Is the 1394 node still present?
603 */
604static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
605{
606 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
607}
608
599 609
600 610
601/********************************************* 611/*********************************************
@@ -631,11 +641,23 @@ static int sbp2_remove(struct device *dev)
631{ 641{
632 struct unit_directory *ud; 642 struct unit_directory *ud;
633 struct scsi_id_instance_data *scsi_id; 643 struct scsi_id_instance_data *scsi_id;
644 struct scsi_device *sdev;
634 645
635 SBP2_DEBUG("sbp2_remove"); 646 SBP2_DEBUG("sbp2_remove");
636 647
637 ud = container_of(dev, struct unit_directory, device); 648 ud = container_of(dev, struct unit_directory, device);
638 scsi_id = ud->device.driver_data; 649 scsi_id = ud->device.driver_data;
650 if (!scsi_id)
651 return 0;
652
653 /* Trigger shutdown functions in scsi's highlevel. */
654 if (scsi_id->scsi_host)
655 scsi_unblock_requests(scsi_id->scsi_host);
656 sdev = scsi_id->sdev;
657 if (sdev) {
658 scsi_id->sdev = NULL;
659 scsi_remove_device(sdev);
660 }
639 661
640 sbp2_logout_device(scsi_id); 662 sbp2_logout_device(scsi_id);
641 sbp2_remove_device(scsi_id); 663 sbp2_remove_device(scsi_id);
@@ -2473,37 +2495,26 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2473 struct scsi_id_instance_data *scsi_id = 2495 struct scsi_id_instance_data *scsi_id =
2474 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2496 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2475 struct sbp2scsi_host_info *hi; 2497 struct sbp2scsi_host_info *hi;
2498 int result = DID_NO_CONNECT << 16;
2476 2499
2477 SBP2_DEBUG("sbp2scsi_queuecommand"); 2500 SBP2_DEBUG("sbp2scsi_queuecommand");
2478 2501
2479 /* 2502 if (!sbp2util_node_is_available(scsi_id))
2480 * If scsi_id is null, it means there is no device in this slot, 2503 goto done;
2481 * so we should return selection timeout.
2482 */
2483 if (!scsi_id) {
2484 SCpnt->result = DID_NO_CONNECT << 16;
2485 done (SCpnt);
2486 return 0;
2487 }
2488 2504
2489 hi = scsi_id->hi; 2505 hi = scsi_id->hi;
2490 2506
2491 if (!hi) { 2507 if (!hi) {
2492 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!"); 2508 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
2493 SCpnt->result = DID_NO_CONNECT << 16; 2509 goto done;
2494 done (SCpnt);
2495 return(0);
2496 } 2510 }
2497 2511
2498 /* 2512 /*
2499 * Until we handle multiple luns, just return selection time-out 2513 * Until we handle multiple luns, just return selection time-out
2500 * to any IO directed at non-zero LUNs 2514 * to any IO directed at non-zero LUNs
2501 */ 2515 */
2502 if (SCpnt->device->lun) { 2516 if (SCpnt->device->lun)
2503 SCpnt->result = DID_NO_CONNECT << 16; 2517 goto done;
2504 done (SCpnt);
2505 return(0);
2506 }
2507 2518
2508 /* 2519 /*
2509 * Check for request sense command, and handle it here 2520 * Check for request sense command, and handle it here
@@ -2514,7 +2525,7 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2514 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen); 2525 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
2515 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 2526 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
2516 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done); 2527 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
2517 return(0); 2528 return 0;
2518 } 2529 }
2519 2530
2520 /* 2531 /*
@@ -2522,9 +2533,8 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2522 */ 2533 */
2523 if (!hpsb_node_entry_valid(scsi_id->ne)) { 2534 if (!hpsb_node_entry_valid(scsi_id->ne)) {
2524 SBP2_ERR("Bus reset in progress - rejecting command"); 2535 SBP2_ERR("Bus reset in progress - rejecting command");
2525 SCpnt->result = DID_BUS_BUSY << 16; 2536 result = DID_BUS_BUSY << 16;
2526 done (SCpnt); 2537 goto done;
2527 return(0);
2528 } 2538 }
2529 2539
2530 /* 2540 /*
@@ -2535,8 +2545,12 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2535 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, 2545 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
2536 SCpnt, done); 2546 SCpnt, done);
2537 } 2547 }
2548 return 0;
2538 2549
2539 return(0); 2550done:
2551 SCpnt->result = result;
2552 done(SCpnt);
2553 return 0;
2540} 2554}
2541 2555
2542/* 2556/*
@@ -2683,14 +2697,27 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2683} 2697}
2684 2698
2685 2699
2686static int sbp2scsi_slave_configure (struct scsi_device *sdev) 2700static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2687{ 2701{
2688 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2702 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
2703 return 0;
2704}
2705
2689 2706
2707static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2708{
2709 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2690 return 0; 2710 return 0;
2691} 2711}
2692 2712
2693 2713
2714static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2715{
2716 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
2717 return;
2718}
2719
2720
2694/* 2721/*
2695 * Called by scsi stack when something has really gone wrong. Usually 2722 * Called by scsi stack when something has really gone wrong. Usually
2696 * called when a command has timed-out for some reason. 2723 * called when a command has timed-out for some reason.
@@ -2705,7 +2732,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2705 SBP2_ERR("aborting sbp2 command"); 2732 SBP2_ERR("aborting sbp2 command");
2706 scsi_print_command(SCpnt); 2733 scsi_print_command(SCpnt);
2707 2734
2708 if (scsi_id) { 2735 if (sbp2util_node_is_available(scsi_id)) {
2709 2736
2710 /* 2737 /*
2711 * Right now, just return any matching command structures 2738 * Right now, just return any matching command structures
@@ -2742,31 +2769,24 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2742/* 2769/*
2743 * Called by scsi stack when something has really gone wrong. 2770 * Called by scsi stack when something has really gone wrong.
2744 */ 2771 */
2745static int __sbp2scsi_reset(struct scsi_cmnd *SCpnt) 2772static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2746{ 2773{
2747 struct scsi_id_instance_data *scsi_id = 2774 struct scsi_id_instance_data *scsi_id =
2748 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2775 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2776 unsigned long flags;
2749 2777
2750 SBP2_ERR("reset requested"); 2778 SBP2_ERR("reset requested");
2751 2779
2752 if (scsi_id) { 2780 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2781
2782 if (sbp2util_node_is_available(scsi_id)) {
2753 SBP2_ERR("Generating sbp2 fetch agent reset"); 2783 SBP2_ERR("Generating sbp2 fetch agent reset");
2754 sbp2_agent_reset(scsi_id, 0); 2784 sbp2_agent_reset(scsi_id, 0);
2755 } 2785 }
2756 2786
2757 return(SUCCESS);
2758}
2759
2760static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2761{
2762 unsigned long flags;
2763 int rc;
2764
2765 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2766 rc = __sbp2scsi_reset(SCpnt);
2767 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); 2787 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
2768 2788
2769 return rc; 2789 return SUCCESS;
2770} 2790}
2771 2791
2772static const char *sbp2scsi_info (struct Scsi_Host *host) 2792static const char *sbp2scsi_info (struct Scsi_Host *host)
@@ -2817,7 +2837,9 @@ static struct scsi_host_template scsi_driver_template = {
2817 .eh_device_reset_handler = sbp2scsi_reset, 2837 .eh_device_reset_handler = sbp2scsi_reset,
2818 .eh_bus_reset_handler = sbp2scsi_reset, 2838 .eh_bus_reset_handler = sbp2scsi_reset,
2819 .eh_host_reset_handler = sbp2scsi_reset, 2839 .eh_host_reset_handler = sbp2scsi_reset,
2840 .slave_alloc = sbp2scsi_slave_alloc,
2820 .slave_configure = sbp2scsi_slave_configure, 2841 .slave_configure = sbp2scsi_slave_configure,
2842 .slave_destroy = sbp2scsi_slave_destroy,
2821 .this_id = -1, 2843 .this_id = -1,
2822 .sg_tablesize = SG_ALL, 2844 .sg_tablesize = SG_ALL,
2823 .use_clustering = ENABLE_CLUSTERING, 2845 .use_clustering = ENABLE_CLUSTERING,
@@ -2837,7 +2859,8 @@ static int sbp2_module_init(void)
2837 2859
2838 /* Module load debug option to force one command at a time (serializing I/O) */ 2860 /* Module load debug option to force one command at a time (serializing I/O) */
2839 if (serialize_io) { 2861 if (serialize_io) {
2840 SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)"); 2862 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
2863 SBP2_INFO("Try serialize_io=0 for better performance");
2841 scsi_driver_template.can_queue = 1; 2864 scsi_driver_template.can_queue = 1;
2842 scsi_driver_template.cmd_per_lun = 1; 2865 scsi_driver_template.cmd_per_lun = 1;
2843 } 2866 }
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 9d6facf2f78f..11be9c9c82a8 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1571,4 +1571,3 @@ static int __init video1394_init_module (void)
1571 1571
1572module_init(video1394_init_module); 1572module_init(video1394_init_module);
1573module_exit(video1394_exit_module); 1573module_exit(video1394_exit_module);
1574MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b1897bed14ad..cc124344dd2c 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -69,6 +69,7 @@ struct ib_uverbs_event_file {
69 69
70struct ib_uverbs_file { 70struct ib_uverbs_file {
71 struct kref ref; 71 struct kref ref;
72 struct semaphore mutex;
72 struct ib_uverbs_device *device; 73 struct ib_uverbs_device *device;
73 struct ib_ucontext *ucontext; 74 struct ib_ucontext *ucontext;
74 struct ib_event_handler event_handler; 75 struct ib_event_handler event_handler;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e91ebde46481..562445165d2b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -76,8 +76,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
76 struct ib_uverbs_get_context_resp resp; 76 struct ib_uverbs_get_context_resp resp;
77 struct ib_udata udata; 77 struct ib_udata udata;
78 struct ib_device *ibdev = file->device->ib_dev; 78 struct ib_device *ibdev = file->device->ib_dev;
79 struct ib_ucontext *ucontext;
79 int i; 80 int i;
80 int ret = in_len; 81 int ret;
81 82
82 if (out_len < sizeof resp) 83 if (out_len < sizeof resp)
83 return -ENOSPC; 84 return -ENOSPC;
@@ -85,45 +86,56 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
85 if (copy_from_user(&cmd, buf, sizeof cmd)) 86 if (copy_from_user(&cmd, buf, sizeof cmd))
86 return -EFAULT; 87 return -EFAULT;
87 88
89 down(&file->mutex);
90
91 if (file->ucontext) {
92 ret = -EINVAL;
93 goto err;
94 }
95
88 INIT_UDATA(&udata, buf + sizeof cmd, 96 INIT_UDATA(&udata, buf + sizeof cmd,
89 (unsigned long) cmd.response + sizeof resp, 97 (unsigned long) cmd.response + sizeof resp,
90 in_len - sizeof cmd, out_len - sizeof resp); 98 in_len - sizeof cmd, out_len - sizeof resp);
91 99
92 file->ucontext = ibdev->alloc_ucontext(ibdev, &udata); 100 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
93 if (IS_ERR(file->ucontext)) { 101 if (IS_ERR(ucontext))
94 ret = PTR_ERR(file->ucontext); 102 return PTR_ERR(file->ucontext);
95 file->ucontext = NULL;
96 return ret;
97 }
98 103
99 file->ucontext->device = ibdev; 104 ucontext->device = ibdev;
100 INIT_LIST_HEAD(&file->ucontext->pd_list); 105 INIT_LIST_HEAD(&ucontext->pd_list);
101 INIT_LIST_HEAD(&file->ucontext->mr_list); 106 INIT_LIST_HEAD(&ucontext->mr_list);
102 INIT_LIST_HEAD(&file->ucontext->mw_list); 107 INIT_LIST_HEAD(&ucontext->mw_list);
103 INIT_LIST_HEAD(&file->ucontext->cq_list); 108 INIT_LIST_HEAD(&ucontext->cq_list);
104 INIT_LIST_HEAD(&file->ucontext->qp_list); 109 INIT_LIST_HEAD(&ucontext->qp_list);
105 INIT_LIST_HEAD(&file->ucontext->srq_list); 110 INIT_LIST_HEAD(&ucontext->srq_list);
106 INIT_LIST_HEAD(&file->ucontext->ah_list); 111 INIT_LIST_HEAD(&ucontext->ah_list);
107 spin_lock_init(&file->ucontext->lock);
108 112
109 resp.async_fd = file->async_file.fd; 113 resp.async_fd = file->async_file.fd;
110 for (i = 0; i < file->device->num_comp; ++i) 114 for (i = 0; i < file->device->num_comp; ++i)
111 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab + 115 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
112 i * sizeof (__u32), 116 i * sizeof (__u32),
113 &file->comp_file[i].fd, sizeof (__u32))) 117 &file->comp_file[i].fd, sizeof (__u32))) {
114 goto err; 118 ret = -EFAULT;
119 goto err_free;
120 }
115 121
116 if (copy_to_user((void __user *) (unsigned long) cmd.response, 122 if (copy_to_user((void __user *) (unsigned long) cmd.response,
117 &resp, sizeof resp)) 123 &resp, sizeof resp)) {
118 goto err; 124 ret = -EFAULT;
125 goto err_free;
126 }
127
128 file->ucontext = ucontext;
129 up(&file->mutex);
119 130
120 return in_len; 131 return in_len;
121 132
122err: 133err_free:
123 ibdev->dealloc_ucontext(file->ucontext); 134 ibdev->dealloc_ucontext(ucontext);
124 file->ucontext = NULL;
125 135
126 return -EFAULT; 136err:
137 up(&file->mutex);
138 return ret;
127} 139}
128 140
129ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 141ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
@@ -352,9 +364,9 @@ retry:
352 if (ret) 364 if (ret)
353 goto err_pd; 365 goto err_pd;
354 366
355 spin_lock_irq(&file->ucontext->lock); 367 down(&file->mutex);
356 list_add_tail(&uobj->list, &file->ucontext->pd_list); 368 list_add_tail(&uobj->list, &file->ucontext->pd_list);
357 spin_unlock_irq(&file->ucontext->lock); 369 up(&file->mutex);
358 370
359 memset(&resp, 0, sizeof resp); 371 memset(&resp, 0, sizeof resp);
360 resp.pd_handle = uobj->id; 372 resp.pd_handle = uobj->id;
@@ -368,9 +380,9 @@ retry:
368 return in_len; 380 return in_len;
369 381
370err_list: 382err_list:
371 spin_lock_irq(&file->ucontext->lock); 383 down(&file->mutex);
372 list_del(&uobj->list); 384 list_del(&uobj->list);
373 spin_unlock_irq(&file->ucontext->lock); 385 up(&file->mutex);
374 386
375 down(&ib_uverbs_idr_mutex); 387 down(&ib_uverbs_idr_mutex);
376 idr_remove(&ib_uverbs_pd_idr, uobj->id); 388 idr_remove(&ib_uverbs_pd_idr, uobj->id);
@@ -410,9 +422,9 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
410 422
411 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 423 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
412 424
413 spin_lock_irq(&file->ucontext->lock); 425 down(&file->mutex);
414 list_del(&uobj->list); 426 list_del(&uobj->list);
415 spin_unlock_irq(&file->ucontext->lock); 427 up(&file->mutex);
416 428
417 kfree(uobj); 429 kfree(uobj);
418 430
@@ -512,9 +524,9 @@ retry:
512 524
513 resp.mr_handle = obj->uobject.id; 525 resp.mr_handle = obj->uobject.id;
514 526
515 spin_lock_irq(&file->ucontext->lock); 527 down(&file->mutex);
516 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 528 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
517 spin_unlock_irq(&file->ucontext->lock); 529 up(&file->mutex);
518 530
519 if (copy_to_user((void __user *) (unsigned long) cmd.response, 531 if (copy_to_user((void __user *) (unsigned long) cmd.response,
520 &resp, sizeof resp)) { 532 &resp, sizeof resp)) {
@@ -527,9 +539,9 @@ retry:
527 return in_len; 539 return in_len;
528 540
529err_list: 541err_list:
530 spin_lock_irq(&file->ucontext->lock); 542 down(&file->mutex);
531 list_del(&obj->uobject.list); 543 list_del(&obj->uobject.list);
532 spin_unlock_irq(&file->ucontext->lock); 544 up(&file->mutex);
533 545
534err_unreg: 546err_unreg:
535 ib_dereg_mr(mr); 547 ib_dereg_mr(mr);
@@ -570,9 +582,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
570 582
571 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 583 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
572 584
573 spin_lock_irq(&file->ucontext->lock); 585 down(&file->mutex);
574 list_del(&memobj->uobject.list); 586 list_del(&memobj->uobject.list);
575 spin_unlock_irq(&file->ucontext->lock); 587 up(&file->mutex);
576 588
577 ib_umem_release(file->device->ib_dev, &memobj->umem); 589 ib_umem_release(file->device->ib_dev, &memobj->umem);
578 kfree(memobj); 590 kfree(memobj);
@@ -647,9 +659,9 @@ retry:
647 if (ret) 659 if (ret)
648 goto err_cq; 660 goto err_cq;
649 661
650 spin_lock_irq(&file->ucontext->lock); 662 down(&file->mutex);
651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 663 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
652 spin_unlock_irq(&file->ucontext->lock); 664 up(&file->mutex);
653 665
654 memset(&resp, 0, sizeof resp); 666 memset(&resp, 0, sizeof resp);
655 resp.cq_handle = uobj->uobject.id; 667 resp.cq_handle = uobj->uobject.id;
@@ -664,9 +676,9 @@ retry:
664 return in_len; 676 return in_len;
665 677
666err_list: 678err_list:
667 spin_lock_irq(&file->ucontext->lock); 679 down(&file->mutex);
668 list_del(&uobj->uobject.list); 680 list_del(&uobj->uobject.list);
669 spin_unlock_irq(&file->ucontext->lock); 681 up(&file->mutex);
670 682
671 down(&ib_uverbs_idr_mutex); 683 down(&ib_uverbs_idr_mutex);
672 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 684 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
@@ -712,9 +724,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
712 724
713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 725 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
714 726
715 spin_lock_irq(&file->ucontext->lock); 727 down(&file->mutex);
716 list_del(&uobj->uobject.list); 728 list_del(&uobj->uobject.list);
717 spin_unlock_irq(&file->ucontext->lock); 729 up(&file->mutex);
718 730
719 spin_lock_irq(&file->comp_file[0].lock); 731 spin_lock_irq(&file->comp_file[0].lock);
720 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 732 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
@@ -847,9 +859,9 @@ retry:
847 859
848 resp.qp_handle = uobj->uobject.id; 860 resp.qp_handle = uobj->uobject.id;
849 861
850 spin_lock_irq(&file->ucontext->lock); 862 down(&file->mutex);
851 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 863 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
852 spin_unlock_irq(&file->ucontext->lock); 864 up(&file->mutex);
853 865
854 if (copy_to_user((void __user *) (unsigned long) cmd.response, 866 if (copy_to_user((void __user *) (unsigned long) cmd.response,
855 &resp, sizeof resp)) { 867 &resp, sizeof resp)) {
@@ -862,9 +874,9 @@ retry:
862 return in_len; 874 return in_len;
863 875
864err_list: 876err_list:
865 spin_lock_irq(&file->ucontext->lock); 877 down(&file->mutex);
866 list_del(&uobj->uobject.list); 878 list_del(&uobj->uobject.list);
867 spin_unlock_irq(&file->ucontext->lock); 879 up(&file->mutex);
868 880
869err_destroy: 881err_destroy:
870 ib_destroy_qp(qp); 882 ib_destroy_qp(qp);
@@ -989,9 +1001,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
989 1001
990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1002 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
991 1003
992 spin_lock_irq(&file->ucontext->lock); 1004 down(&file->mutex);
993 list_del(&uobj->uobject.list); 1005 list_del(&uobj->uobject.list);
994 spin_unlock_irq(&file->ucontext->lock); 1006 up(&file->mutex);
995 1007
996 spin_lock_irq(&file->async_file.lock); 1008 spin_lock_irq(&file->async_file.lock);
997 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1009 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
@@ -1136,9 +1148,9 @@ retry:
1136 1148
1137 resp.srq_handle = uobj->uobject.id; 1149 resp.srq_handle = uobj->uobject.id;
1138 1150
1139 spin_lock_irq(&file->ucontext->lock); 1151 down(&file->mutex);
1140 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1152 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1141 spin_unlock_irq(&file->ucontext->lock); 1153 up(&file->mutex);
1142 1154
1143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1155 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1144 &resp, sizeof resp)) { 1156 &resp, sizeof resp)) {
@@ -1151,9 +1163,9 @@ retry:
1151 return in_len; 1163 return in_len;
1152 1164
1153err_list: 1165err_list:
1154 spin_lock_irq(&file->ucontext->lock); 1166 down(&file->mutex);
1155 list_del(&uobj->uobject.list); 1167 list_del(&uobj->uobject.list);
1156 spin_unlock_irq(&file->ucontext->lock); 1168 up(&file->mutex);
1157 1169
1158err_destroy: 1170err_destroy:
1159 ib_destroy_srq(srq); 1171 ib_destroy_srq(srq);
@@ -1227,9 +1239,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1227 1239
1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1240 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1229 1241
1230 spin_lock_irq(&file->ucontext->lock); 1242 down(&file->mutex);
1231 list_del(&uobj->uobject.list); 1243 list_del(&uobj->uobject.list);
1232 spin_unlock_irq(&file->ucontext->lock); 1244 up(&file->mutex);
1233 1245
1234 spin_lock_irq(&file->async_file.lock); 1246 spin_lock_irq(&file->async_file.lock);
1235 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1247 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index ce5bdb7af306..12511808de21 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -448,7 +448,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
448 if (hdr.in_words * 4 != count) 448 if (hdr.in_words * 4 != count)
449 return -EINVAL; 449 return -EINVAL;
450 450
451 if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table)) 451 if (hdr.command < 0 ||
452 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
453 !uverbs_cmd_table[hdr.command])
452 return -EINVAL; 454 return -EINVAL;
453 455
454 if (!file->ucontext && 456 if (!file->ucontext &&
@@ -484,27 +486,29 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
484 file = kmalloc(sizeof *file + 486 file = kmalloc(sizeof *file +
485 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file), 487 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file),
486 GFP_KERNEL); 488 GFP_KERNEL);
487 if (!file) 489 if (!file) {
488 return -ENOMEM; 490 ret = -ENOMEM;
491 goto err;
492 }
489 493
490 file->device = dev; 494 file->device = dev;
491 kref_init(&file->ref); 495 kref_init(&file->ref);
496 init_MUTEX(&file->mutex);
492 497
493 file->ucontext = NULL; 498 file->ucontext = NULL;
494 499
500 kref_get(&file->ref);
495 ret = ib_uverbs_event_init(&file->async_file, file); 501 ret = ib_uverbs_event_init(&file->async_file, file);
496 if (ret) 502 if (ret)
497 goto err; 503 goto err_kref;
498 504
499 file->async_file.is_async = 1; 505 file->async_file.is_async = 1;
500 506
501 kref_get(&file->ref);
502
503 for (i = 0; i < dev->num_comp; ++i) { 507 for (i = 0; i < dev->num_comp; ++i) {
508 kref_get(&file->ref);
504 ret = ib_uverbs_event_init(&file->comp_file[i], file); 509 ret = ib_uverbs_event_init(&file->comp_file[i], file);
505 if (ret) 510 if (ret)
506 goto err_async; 511 goto err_async;
507 kref_get(&file->ref);
508 file->comp_file[i].is_async = 0; 512 file->comp_file[i].is_async = 0;
509 } 513 }
510 514
@@ -524,9 +528,16 @@ err_async:
524 528
525 ib_uverbs_event_release(&file->async_file); 529 ib_uverbs_event_release(&file->async_file);
526 530
527err: 531err_kref:
532 /*
533 * One extra kref_put() because we took a reference before the
534 * event file creation that failed and got us here.
535 */
536 kref_put(&file->ref, ib_uverbs_release_file);
528 kref_put(&file->ref, ib_uverbs_release_file); 537 kref_put(&file->ref, ib_uverbs_release_file);
529 538
539err:
540 module_put(dev->ib_dev->owner);
530 return ret; 541 return ret;
531} 542}
532 543
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index cc758a2d2bc6..f6a8ac026557 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -605,7 +605,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
605 err = -EINVAL; 605 err = -EINVAL;
606 goto out; 606 goto out;
607 } 607 }
608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
609 if (virt != -1) { 609 if (virt != -1) {
610 pages[nent * 2] = cpu_to_be64(virt); 610 pages[nent * 2] = cpu_to_be64(virt);
611 virt += 1 << lg; 611 virt += 1 << lg;
@@ -616,7 +616,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
616 ts += 1 << (lg - 10); 616 ts += 1 << (lg - 10);
617 ++tc; 617 ++tc;
618 618
619 if (nent == MTHCA_MAILBOX_SIZE / 16) { 619 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
621 CMD_TIME_CLASS_B, status); 621 CMD_TIME_CLASS_B, status);
622 if (err || *status) 622 if (err || *status)
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 78152a8ad17d..c81fa8e975ef 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -836,7 +836,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
836 dev->eq_table.clr_mask = 836 dev->eq_table.clr_mask =
837 swab32(1 << (dev->eq_table.inta_pin & 31)); 837 swab32(1 << (dev->eq_table.inta_pin & 31));
838 dev->eq_table.clr_int = dev->clr_base + 838 dev->eq_table.clr_int = dev->clr_base +
839 (dev->eq_table.inta_pin < 31 ? 4 : 0); 839 (dev->eq_table.inta_pin < 32 ? 4 : 0);
840 } 840 }
841 841
842 dev->eq_table.arm_mask = 0; 842 dev->eq_table.arm_mask = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1827400f189b..7bd7a4bec7b4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -290,7 +290,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
290 int i; 290 int i;
291 u8 status; 291 u8 status;
292 292
293 num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE; 293 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
294 294
295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
296 if (!table) 296 if (!table)
@@ -529,12 +529,25 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
529 goto found; 529 goto found;
530 } 530 }
531 531
532 for (i = start; i != end; i += dir)
533 if (!dev->db_tab->page[i].db_rec) {
534 page = dev->db_tab->page + i;
535 goto alloc;
536 }
537
532 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { 538 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
533 ret = -ENOMEM; 539 ret = -ENOMEM;
534 goto out; 540 goto out;
535 } 541 }
536 542
543 if (group == 0)
544 ++dev->db_tab->max_group1;
545 else
546 --dev->db_tab->min_group2;
547
537 page = dev->db_tab->page + end; 548 page = dev->db_tab->page + end;
549
550alloc:
538 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, 551 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
539 &page->mapping, GFP_KERNEL); 552 &page->mapping, GFP_KERNEL);
540 if (!page->db_rec) { 553 if (!page->db_rec) {
@@ -554,10 +567,6 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
554 } 567 }
555 568
556 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); 569 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
557 if (group == 0)
558 ++dev->db_tab->max_group1;
559 else
560 --dev->db_tab->min_group2;
561 570
562found: 571found:
563 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); 572 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c1c2e230871..3f5319a46577 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -84,7 +84,7 @@ static int mthca_query_device(struct ib_device *ibdev,
84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
85 0xffffff; 85 0xffffff;
86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
87 props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); 87 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
89 memcpy(&props->node_guid, out_mad->data + 12, 8); 89 memcpy(&props->node_guid, out_mad->data + 12, 8);
90 90
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 88636a204525..14ae5583e198 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
308 MATCH_BIT(ledbit, LED_MAX); 308 MATCH_BIT(ledbit, LED_MAX);
309 MATCH_BIT(sndbit, SND_MAX); 309 MATCH_BIT(sndbit, SND_MAX);
310 MATCH_BIT(ffbit, FF_MAX); 310 MATCH_BIT(ffbit, FF_MAX);
311 MATCH_BIT(swbit, SW_MAX);
311 312
312 return id; 313 return id;
313 } 314 }
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index e1f0d87de0eb..0b0ea26023e5 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -287,12 +287,12 @@ divert_dev_init(void)
287 init_waitqueue_head(&rd_queue); 287 init_waitqueue_head(&rd_queue);
288 288
289#ifdef CONFIG_PROC_FS 289#ifdef CONFIG_PROC_FS
290 isdn_proc_entry = create_proc_entry("isdn", S_IFDIR | S_IRUGO | S_IXUGO, proc_net); 290 isdn_proc_entry = proc_mkdir("net/isdn", NULL);
291 if (!isdn_proc_entry) 291 if (!isdn_proc_entry)
292 return (-1); 292 return (-1);
293 isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); 293 isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry);
294 if (!isdn_divert_entry) { 294 if (!isdn_divert_entry) {
295 remove_proc_entry("isdn", proc_net); 295 remove_proc_entry("net/isdn", NULL);
296 return (-1); 296 return (-1);
297 } 297 }
298 isdn_divert_entry->proc_fops = &isdn_fops; 298 isdn_divert_entry->proc_fops = &isdn_fops;
@@ -312,7 +312,7 @@ divert_dev_deinit(void)
312 312
313#ifdef CONFIG_PROC_FS 313#ifdef CONFIG_PROC_FS
314 remove_proc_entry("divert", isdn_proc_entry); 314 remove_proc_entry("divert", isdn_proc_entry);
315 remove_proc_entry("isdn", proc_net); 315 remove_proc_entry("net/isdn", NULL);
316#endif /* CONFIG_PROC_FS */ 316#endif /* CONFIG_PROC_FS */
317 317
318 return (0); 318 return (0);
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 7fdf8ae5be52..27204f4b111a 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -30,8 +30,6 @@ static char *DRIVERNAME =
30static char *DRIVERLNAME = "divadidd"; 30static char *DRIVERLNAME = "divadidd";
31char *DRIVERRELEASE_DIDD = "2.0"; 31char *DRIVERRELEASE_DIDD = "2.0";
32 32
33static char *main_proc_dir = "eicon";
34
35MODULE_DESCRIPTION("DIDD table driver for diva drivers"); 33MODULE_DESCRIPTION("DIDD table driver for diva drivers");
36MODULE_AUTHOR("Cytronics & Melware, Eicon Networks"); 34MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
37MODULE_SUPPORTED_DEVICE("Eicon diva drivers"); 35MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
@@ -89,7 +87,7 @@ proc_read(char *page, char **start, off_t off, int count, int *eof,
89 87
90static int DIVA_INIT_FUNCTION create_proc(void) 88static int DIVA_INIT_FUNCTION create_proc(void)
91{ 89{
92 proc_net_eicon = create_proc_entry(main_proc_dir, S_IFDIR, proc_net); 90 proc_net_eicon = proc_mkdir("net/eicon", NULL);
93 91
94 if (proc_net_eicon) { 92 if (proc_net_eicon) {
95 if ((proc_didd = 93 if ((proc_didd =
@@ -105,7 +103,7 @@ static int DIVA_INIT_FUNCTION create_proc(void)
105static void DIVA_EXIT_FUNCTION remove_proc(void) 103static void DIVA_EXIT_FUNCTION remove_proc(void)
106{ 104{
107 remove_proc_entry(DRIVERLNAME, proc_net_eicon); 105 remove_proc_entry(DRIVERLNAME, proc_net_eicon);
108 remove_proc_entry(main_proc_dir, proc_net); 106 remove_proc_entry("net/eicon", NULL);
109} 107}
110 108
111static int DIVA_INIT_FUNCTION divadidd_init(void) 109static int DIVA_INIT_FUNCTION divadidd_init(void)
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b6435589d459..c12efa6f8429 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -381,7 +381,7 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
381 char tmp[16]; 381 char tmp[16];
382 382
383 sprintf(tmp, "%s%d", adapter_dir_name, a->controller); 383 sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
384 if (!(de = create_proc_entry(tmp, S_IFDIR, proc_net_eicon))) 384 if (!(de = proc_mkdir(tmp, proc_net_eicon)))
385 return (0); 385 return (0);
386 a->proc_adapter_dir = (void *) de; 386 a->proc_adapter_dir = (void *) de;
387 387
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 0a2536d62402..657817a591fe 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -209,9 +209,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
209 bcs->mode = mode; 209 bcs->mode = mode;
210 210
211 // Cancel all USB transfers on this B channel 211 // Cancel all USB transfers on this B channel
212 b_out->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
213 usb_unlink_urb(b_out->urb[0]); 212 usb_unlink_urb(b_out->urb[0]);
214 b_out->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
215 usb_unlink_urb(b_out->urb[1]); 213 usb_unlink_urb(b_out->urb[1]);
216 b_out->busy = 0; 214 b_out->busy = 0;
217 215
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ffd5b2d45552..89fbeb58485d 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -645,9 +645,7 @@ void st5481_in_mode(struct st5481_in *in, int mode)
645 645
646 in->mode = mode; 646 in->mode = mode;
647 647
648 in->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
649 usb_unlink_urb(in->urb[0]); 648 usb_unlink_urb(in->urb[0]);
650 in->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
651 usb_unlink_urb(in->urb[1]); 649 usb_unlink_urb(in->urb[1]);
652 650
653 if (in->mode != L1_MODE_NULL) { 651 if (in->mode != L1_MODE_NULL) {
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 5da507e532fc..639582f61f41 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -394,7 +394,7 @@ hysdn_procconf_init(void)
394 hysdn_card *card; 394 hysdn_card *card;
395 uchar conf_name[20]; 395 uchar conf_name[20];
396 396
397 hysdn_proc_entry = create_proc_entry(PROC_SUBDIR_NAME, S_IFDIR | S_IRUGO | S_IXUGO, proc_net); 397 hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, proc_net);
398 if (!hysdn_proc_entry) { 398 if (!hysdn_proc_entry) {
399 printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); 399 printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
400 return (-1); 400 return (-1);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fb535737d17d..9b38674fbf75 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -8,21 +8,15 @@
8 */ 8 */
9 9
10/* 10/*
11 * For now, this driver includes:
12 * - RTC get & set
13 * - reboot & shutdown commands
14 * all synchronous with IRQ disabled (ugh)
15 *
16 * TODO: 11 * TODO:
17 * rework in a way the PMU driver works, that is asynchronous 12 * - maybe add timeout to commands ?
18 * with a queue of commands. I'll do that as soon as I have an 13 * - blocking version of time functions
19 * SMU based machine at hand. Some more cleanup is needed too, 14 * - polling version of i2c commands (including timer that works with
20 * like maybe fitting it into a platform device, etc... 15 * interrutps off)
21 * Also check what's up with cache coherency, and if we really 16 * - maybe avoid some data copies with i2c by directly using the smu cmd
22 * can't do better than flushing the cache, maybe build a table 17 * buffer and a lower level internal interface
23 * of command len/reply len like the PMU driver to only flush 18 * - understand SMU -> CPU events and implement reception of them via
24 * what is actually necessary. 19 * the userland interface
25 * --BenH.
26 */ 20 */
27 21
28#include <linux/config.h> 22#include <linux/config.h>
@@ -36,6 +30,11 @@
36#include <linux/jiffies.h> 30#include <linux/jiffies.h>
37#include <linux/interrupt.h> 31#include <linux/interrupt.h>
38#include <linux/rtc.h> 32#include <linux/rtc.h>
33#include <linux/completion.h>
34#include <linux/miscdevice.h>
35#include <linux/delay.h>
36#include <linux/sysdev.h>
37#include <linux/poll.h>
39 38
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41#include <asm/io.h> 40#include <asm/io.h>
@@ -45,8 +44,13 @@
45#include <asm/smu.h> 44#include <asm/smu.h>
46#include <asm/sections.h> 45#include <asm/sections.h>
47#include <asm/abs_addr.h> 46#include <asm/abs_addr.h>
47#include <asm/uaccess.h>
48#include <asm/of_device.h>
49
50#define VERSION "0.6"
51#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
48 52
49#define DEBUG_SMU 1 53#undef DEBUG_SMU
50 54
51#ifdef DEBUG_SMU 55#ifdef DEBUG_SMU
52#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) 56#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
@@ -57,20 +61,30 @@
57/* 61/*
58 * This is the command buffer passed to the SMU hardware 62 * This is the command buffer passed to the SMU hardware
59 */ 63 */
64#define SMU_MAX_DATA 254
65
60struct smu_cmd_buf { 66struct smu_cmd_buf {
61 u8 cmd; 67 u8 cmd;
62 u8 length; 68 u8 length;
63 u8 data[0x0FFE]; 69 u8 data[SMU_MAX_DATA];
64}; 70};
65 71
66struct smu_device { 72struct smu_device {
67 spinlock_t lock; 73 spinlock_t lock;
68 struct device_node *of_node; 74 struct device_node *of_node;
69 int db_ack; /* doorbell ack GPIO */ 75 struct of_device *of_dev;
70 int db_req; /* doorbell req GPIO */ 76 int doorbell; /* doorbell gpio */
71 u32 __iomem *db_buf; /* doorbell buffer */ 77 u32 __iomem *db_buf; /* doorbell buffer */
78 int db_irq;
79 int msg;
80 int msg_irq;
72 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ 81 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
73 u32 cmd_buf_abs; /* command buffer absolute */ 82 u32 cmd_buf_abs; /* command buffer absolute */
83 struct list_head cmd_list;
84 struct smu_cmd *cmd_cur; /* pending command */
85 struct list_head cmd_i2c_list;
86 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
87 struct timer_list i2c_timer;
74}; 88};
75 89
76/* 90/*
@@ -79,113 +93,245 @@ struct smu_device {
79 */ 93 */
80static struct smu_device *smu; 94static struct smu_device *smu;
81 95
96
82/* 97/*
83 * SMU low level communication stuff 98 * SMU driver low level stuff
84 */ 99 */
85static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack)
86{
87 rmb();
88 return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0;
89}
90 100
91static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf) 101static void smu_start_cmd(void)
92{ 102{
93 return (~cmd_buf->cmd) & 0xff; 103 unsigned long faddr, fend;
94} 104 struct smu_cmd *cmd;
95 105
96static void smu_send_cmd(struct smu_device *dev) 106 if (list_empty(&smu->cmd_list))
97{ 107 return;
98 /* SMU command buf is currently cacheable, we need a physical 108
99 * address. This isn't exactly a DMA mapping here, I suspect 109 /* Fetch first command in queue */
110 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
111 smu->cmd_cur = cmd;
112 list_del(&cmd->link);
113
114 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
115 cmd->data_len);
116 DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n",
117 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
118 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]);
119
120 /* Fill the SMU command buffer */
121 smu->cmd_buf->cmd = cmd->cmd;
122 smu->cmd_buf->length = cmd->data_len;
123 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
124
125 /* Flush command and data to RAM */
126 faddr = (unsigned long)smu->cmd_buf;
127 fend = faddr + smu->cmd_buf->length + 2;
128 flush_inval_dcache_range(faddr, fend);
129
130 /* This isn't exactly a DMA mapping here, I suspect
100 * the SMU is actually communicating with us via i2c to the 131 * the SMU is actually communicating with us via i2c to the
101 * northbridge or the CPU to access RAM. 132 * northbridge or the CPU to access RAM.
102 */ 133 */
103 writel(dev->cmd_buf_abs, dev->db_buf); 134 writel(smu->cmd_buf_abs, smu->db_buf);
104 135
105 /* Ring the SMU doorbell */ 136 /* Ring the SMU doorbell */
106 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4); 137 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
107 pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4);
108} 138}
109 139
110static int smu_cmd_done(struct smu_device *dev) 140
141static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs)
111{ 142{
112 unsigned long wait = 0; 143 unsigned long flags;
113 int gpio; 144 struct smu_cmd *cmd;
145 void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
146 void *misc = NULL;
147 u8 gpio;
148 int rc = 0;
114 149
115 /* Check the SMU doorbell */ 150 /* SMU completed the command, well, we hope, let's make sure
116 do { 151 * of it
117 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, 152 */
118 NULL, dev->db_ack); 153 spin_lock_irqsave(&smu->lock, flags);
119 if ((gpio & 7) == 7)
120 return 0;
121 udelay(100);
122 } while(++wait < 10000);
123 154
124 printk(KERN_ERR "SMU timeout !\n"); 155 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
125 return -ENXIO; 156 if ((gpio & 7) != 7) {
157 spin_unlock_irqrestore(&smu->lock, flags);
158 return IRQ_HANDLED;
159 }
160
161 cmd = smu->cmd_cur;
162 smu->cmd_cur = NULL;
163 if (cmd == NULL)
164 goto bail;
165
166 if (rc == 0) {
167 unsigned long faddr;
168 int reply_len;
169 u8 ack;
170
171 /* CPU might have brought back the cache line, so we need
172 * to flush again before peeking at the SMU response. We
173 * flush the entire buffer for now as we haven't read the
174 * reply lenght (it's only 2 cache lines anyway)
175 */
176 faddr = (unsigned long)smu->cmd_buf;
177 flush_inval_dcache_range(faddr, faddr + 256);
178
179 /* Now check ack */
180 ack = (~cmd->cmd) & 0xff;
181 if (ack != smu->cmd_buf->cmd) {
182 DPRINTK("SMU: incorrect ack, want %x got %x\n",
183 ack, smu->cmd_buf->cmd);
184 rc = -EIO;
185 }
186 reply_len = rc == 0 ? smu->cmd_buf->length : 0;
187 DPRINTK("SMU: reply len: %d\n", reply_len);
188 if (reply_len > cmd->reply_len) {
189 printk(KERN_WARNING "SMU: reply buffer too small,"
190 "got %d bytes for a %d bytes buffer\n",
191 reply_len, cmd->reply_len);
192 reply_len = cmd->reply_len;
193 }
194 cmd->reply_len = reply_len;
195 if (cmd->reply_buf && reply_len)
196 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
197 }
198
199 /* Now complete the command. Write status last in order as we lost
200 * ownership of the command structure as soon as it's no longer -1
201 */
202 done = cmd->done;
203 misc = cmd->misc;
204 mb();
205 cmd->status = rc;
206 bail:
207 /* Start next command if any */
208 smu_start_cmd();
209 spin_unlock_irqrestore(&smu->lock, flags);
210
211 /* Call command completion handler if any */
212 if (done)
213 done(cmd, misc);
214
215 /* It's an edge interrupt, nothing to do */
216 return IRQ_HANDLED;
126} 217}
127 218
128static int smu_do_cmd(struct smu_device *dev) 219
220static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs)
129{ 221{
130 int rc; 222 /* I don't quite know what to do with this one, we seem to never
131 u8 cmd_ack; 223 * receive it, so I suspect we have to arm it someway in the SMU
224 * to start getting events that way.
225 */
132 226
133 DPRINTK("SMU do_cmd %02x len=%d %02x\n", 227 printk(KERN_INFO "SMU: message interrupt !\n");
134 dev->cmd_buf->cmd, dev->cmd_buf->length,
135 dev->cmd_buf->data[0]);
136 228
137 cmd_ack = smu_save_ack_cmd(dev->cmd_buf); 229 /* It's an edge interrupt, nothing to do */
230 return IRQ_HANDLED;
231}
138 232
139 /* Clear cmd_buf cache lines */
140 flush_inval_dcache_range((unsigned long)dev->cmd_buf,
141 ((unsigned long)dev->cmd_buf) +
142 sizeof(struct smu_cmd_buf));
143 smu_send_cmd(dev);
144 rc = smu_cmd_done(dev);
145 if (rc == 0)
146 rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1;
147 233
148 DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n", 234/*
149 dev->cmd_buf->cmd, dev->cmd_buf->length, 235 * Queued command management.
150 dev->cmd_buf->data[0], rc, cmd_ack); 236 *
237 */
238
239int smu_queue_cmd(struct smu_cmd *cmd)
240{
241 unsigned long flags;
151 242
152 return rc; 243 if (smu == NULL)
244 return -ENODEV;
245 if (cmd->data_len > SMU_MAX_DATA ||
246 cmd->reply_len > SMU_MAX_DATA)
247 return -EINVAL;
248
249 cmd->status = 1;
250 spin_lock_irqsave(&smu->lock, flags);
251 list_add_tail(&cmd->link, &smu->cmd_list);
252 if (smu->cmd_cur == NULL)
253 smu_start_cmd();
254 spin_unlock_irqrestore(&smu->lock, flags);
255
256 return 0;
153} 257}
258EXPORT_SYMBOL(smu_queue_cmd);
154 259
155/* RTC low level commands */ 260
156static inline int bcd2hex (int n) 261int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
262 unsigned int data_len,
263 void (*done)(struct smu_cmd *cmd, void *misc),
264 void *misc, ...)
157{ 265{
158 return (((n & 0xf0) >> 4) * 10) + (n & 0xf); 266 struct smu_cmd *cmd = &scmd->cmd;
267 va_list list;
268 int i;
269
270 if (data_len > sizeof(scmd->buffer))
271 return -EINVAL;
272
273 memset(scmd, 0, sizeof(*scmd));
274 cmd->cmd = command;
275 cmd->data_len = data_len;
276 cmd->data_buf = scmd->buffer;
277 cmd->reply_len = sizeof(scmd->buffer);
278 cmd->reply_buf = scmd->buffer;
279 cmd->done = done;
280 cmd->misc = misc;
281
282 va_start(list, misc);
283 for (i = 0; i < data_len; ++i)
284 scmd->buffer[i] = (u8)va_arg(list, int);
285 va_end(list);
286
287 return smu_queue_cmd(cmd);
159} 288}
289EXPORT_SYMBOL(smu_queue_simple);
160 290
161static inline int hex2bcd (int n) 291
292void smu_poll(void)
162{ 293{
163 return ((n / 10) << 4) + (n % 10); 294 u8 gpio;
295
296 if (smu == NULL)
297 return;
298
299 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
300 if ((gpio & 7) == 7)
301 smu_db_intr(smu->db_irq, smu, NULL);
164} 302}
303EXPORT_SYMBOL(smu_poll);
165 304
166#if 0 305
167static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 306void smu_done_complete(struct smu_cmd *cmd, void *misc)
168{ 307{
169 cmd_buf->cmd = 0x8e; 308 struct completion *comp = misc;
170 cmd_buf->length = 8; 309
171 cmd_buf->data[0] = 0x00; 310 complete(comp);
172 memset(cmd_buf->data + 1, 0, 7);
173} 311}
312EXPORT_SYMBOL(smu_done_complete);
313
174 314
175static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 315void smu_spinwait_cmd(struct smu_cmd *cmd)
176{ 316{
177 cmd_buf->cmd = 0x8e; 317 while(cmd->status == 1)
178 cmd_buf->length = 1; 318 smu_poll();
179 cmd_buf->data[0] = 0x01;
180} 319}
320EXPORT_SYMBOL(smu_spinwait_cmd);
321
181 322
182static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 323/* RTC low level commands */
324static inline int bcd2hex (int n)
183{ 325{
184 cmd_buf->cmd = 0x8e; 326 return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
185 cmd_buf->length = 1;
186 cmd_buf->data[0] = 0x02;
187} 327}
188#endif 328
329
330static inline int hex2bcd (int n)
331{
332 return ((n / 10) << 4) + (n % 10);
333}
334
189 335
190static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, 336static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
191 struct rtc_time *time) 337 struct rtc_time *time)
@@ -202,100 +348,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
202 cmd_buf->data[7] = hex2bcd(time->tm_year - 100); 348 cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
203} 349}
204 350
205static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf)
206{
207 cmd_buf->cmd = 0x8e;
208 cmd_buf->length = 1;
209 cmd_buf->data[0] = 0x81;
210}
211 351
212static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf, 352int smu_get_rtc_time(struct rtc_time *time, int spinwait)
213 struct rtc_time *time)
214{ 353{
215 time->tm_sec = bcd2hex(cmd_buf->data[0]); 354 struct smu_simple_cmd cmd;
216 time->tm_min = bcd2hex(cmd_buf->data[1]);
217 time->tm_hour = bcd2hex(cmd_buf->data[2]);
218 time->tm_wday = bcd2hex(cmd_buf->data[3]);
219 time->tm_mday = bcd2hex(cmd_buf->data[4]);
220 time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1;
221 time->tm_year = bcd2hex(cmd_buf->data[6]) + 100;
222}
223
224int smu_get_rtc_time(struct rtc_time *time)
225{
226 unsigned long flags;
227 int rc; 355 int rc;
228 356
229 if (smu == NULL) 357 if (smu == NULL)
230 return -ENODEV; 358 return -ENODEV;
231 359
232 memset(time, 0, sizeof(struct rtc_time)); 360 memset(time, 0, sizeof(struct rtc_time));
233 spin_lock_irqsave(&smu->lock, flags); 361 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
234 smu_fill_get_rtc_cmd(smu->cmd_buf); 362 SMU_CMD_RTC_GET_DATETIME);
235 rc = smu_do_cmd(smu); 363 if (rc)
236 if (rc == 0) 364 return rc;
237 smu_parse_get_rtc_reply(smu->cmd_buf, time); 365 smu_spinwait_simple(&cmd);
238 spin_unlock_irqrestore(&smu->lock, flags);
239 366
240 return rc; 367 time->tm_sec = bcd2hex(cmd.buffer[0]);
368 time->tm_min = bcd2hex(cmd.buffer[1]);
369 time->tm_hour = bcd2hex(cmd.buffer[2]);
370 time->tm_wday = bcd2hex(cmd.buffer[3]);
371 time->tm_mday = bcd2hex(cmd.buffer[4]);
372 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
373 time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
374
375 return 0;
241} 376}
242 377
243int smu_set_rtc_time(struct rtc_time *time) 378
379int smu_set_rtc_time(struct rtc_time *time, int spinwait)
244{ 380{
245 unsigned long flags; 381 struct smu_simple_cmd cmd;
246 int rc; 382 int rc;
247 383
248 if (smu == NULL) 384 if (smu == NULL)
249 return -ENODEV; 385 return -ENODEV;
250 386
251 spin_lock_irqsave(&smu->lock, flags); 387 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
252 smu_fill_set_rtc_cmd(smu->cmd_buf, time); 388 SMU_CMD_RTC_SET_DATETIME,
253 rc = smu_do_cmd(smu); 389 hex2bcd(time->tm_sec),
254 spin_unlock_irqrestore(&smu->lock, flags); 390 hex2bcd(time->tm_min),
391 hex2bcd(time->tm_hour),
392 time->tm_wday,
393 hex2bcd(time->tm_mday),
394 hex2bcd(time->tm_mon) + 1,
395 hex2bcd(time->tm_year - 100));
396 if (rc)
397 return rc;
398 smu_spinwait_simple(&cmd);
255 399
256 return rc; 400 return 0;
257} 401}
258 402
403
259void smu_shutdown(void) 404void smu_shutdown(void)
260{ 405{
261 const unsigned char *command = "SHUTDOWN"; 406 struct smu_simple_cmd cmd;
262 unsigned long flags;
263 407
264 if (smu == NULL) 408 if (smu == NULL)
265 return; 409 return;
266 410
267 spin_lock_irqsave(&smu->lock, flags); 411 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
268 smu->cmd_buf->cmd = 0xaa; 412 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
269 smu->cmd_buf->length = strlen(command); 413 return;
270 strcpy(smu->cmd_buf->data, command); 414 smu_spinwait_simple(&cmd);
271 smu_do_cmd(smu);
272 for (;;) 415 for (;;)
273 ; 416 ;
274 spin_unlock_irqrestore(&smu->lock, flags);
275} 417}
276 418
419
277void smu_restart(void) 420void smu_restart(void)
278{ 421{
279 const unsigned char *command = "RESTART"; 422 struct smu_simple_cmd cmd;
280 unsigned long flags;
281 423
282 if (smu == NULL) 424 if (smu == NULL)
283 return; 425 return;
284 426
285 spin_lock_irqsave(&smu->lock, flags); 427 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
286 smu->cmd_buf->cmd = 0xaa; 428 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
287 smu->cmd_buf->length = strlen(command); 429 return;
288 strcpy(smu->cmd_buf->data, command); 430 smu_spinwait_simple(&cmd);
289 smu_do_cmd(smu);
290 for (;;) 431 for (;;)
291 ; 432 ;
292 spin_unlock_irqrestore(&smu->lock, flags);
293} 433}
294 434
435
295int smu_present(void) 436int smu_present(void)
296{ 437{
297 return smu != NULL; 438 return smu != NULL;
298} 439}
440EXPORT_SYMBOL(smu_present);
299 441
300 442
301int smu_init (void) 443int smu_init (void)
@@ -307,6 +449,8 @@ int smu_init (void)
307 if (np == NULL) 449 if (np == NULL)
308 return -ENODEV; 450 return -ENODEV;
309 451
452 printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
453
310 if (smu_cmdbuf_abs == 0) { 454 if (smu_cmdbuf_abs == 0) {
311 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 455 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
312 return -EINVAL; 456 return -EINVAL;
@@ -318,7 +462,13 @@ int smu_init (void)
318 memset(smu, 0, sizeof(*smu)); 462 memset(smu, 0, sizeof(*smu));
319 463
320 spin_lock_init(&smu->lock); 464 spin_lock_init(&smu->lock);
465 INIT_LIST_HEAD(&smu->cmd_list);
466 INIT_LIST_HEAD(&smu->cmd_i2c_list);
321 smu->of_node = np; 467 smu->of_node = np;
468 smu->db_irq = NO_IRQ;
469 smu->msg_irq = NO_IRQ;
470 init_timer(&smu->i2c_timer);
471
322 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a 472 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
323 * 32 bits value safely 473 * 32 bits value safely
324 */ 474 */
@@ -331,8 +481,8 @@ int smu_init (void)
331 goto fail; 481 goto fail;
332 } 482 }
333 data = (u32 *)get_property(np, "reg", NULL); 483 data = (u32 *)get_property(np, "reg", NULL);
334 of_node_put(np);
335 if (data == NULL) { 484 if (data == NULL) {
485 of_node_put(np);
336 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 486 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
337 goto fail; 487 goto fail;
338 } 488 }
@@ -341,8 +491,31 @@ int smu_init (void)
341 * and ack. GPIOs are at 0x50, best would be to find that out 491 * and ack. GPIOs are at 0x50, best would be to find that out
342 * in the device-tree though. 492 * in the device-tree though.
343 */ 493 */
344 smu->db_req = 0x50 + *data; 494 smu->doorbell = *data;
345 smu->db_ack = 0x50 + *data; 495 if (smu->doorbell < 0x50)
496 smu->doorbell += 0x50;
497 if (np->n_intrs > 0)
498 smu->db_irq = np->intrs[0].line;
499
500 of_node_put(np);
501
502 /* Now look for the smu-interrupt GPIO */
503 do {
504 np = of_find_node_by_name(NULL, "smu-interrupt");
505 if (np == NULL)
506 break;
507 data = (u32 *)get_property(np, "reg", NULL);
508 if (data == NULL) {
509 of_node_put(np);
510 break;
511 }
512 smu->msg = *data;
513 if (smu->msg < 0x50)
514 smu->msg += 0x50;
515 if (np->n_intrs > 0)
516 smu->msg_irq = np->intrs[0].line;
517 of_node_put(np);
518 } while(0);
346 519
347 /* Doorbell buffer is currently hard-coded, I didn't find a proper 520 /* Doorbell buffer is currently hard-coded, I didn't find a proper
348 * device-tree entry giving the address. Best would probably to use 521 * device-tree entry giving the address. Best would probably to use
@@ -362,3 +535,584 @@ int smu_init (void)
362 return -ENXIO; 535 return -ENXIO;
363 536
364} 537}
538
539
540static int smu_late_init(void)
541{
542 if (!smu)
543 return 0;
544
545 /*
546 * Try to request the interrupts
547 */
548
549 if (smu->db_irq != NO_IRQ) {
550 if (request_irq(smu->db_irq, smu_db_intr,
551 SA_SHIRQ, "SMU doorbell", smu) < 0) {
552 printk(KERN_WARNING "SMU: can't "
553 "request interrupt %d\n",
554 smu->db_irq);
555 smu->db_irq = NO_IRQ;
556 }
557 }
558
559 if (smu->msg_irq != NO_IRQ) {
560 if (request_irq(smu->msg_irq, smu_msg_intr,
561 SA_SHIRQ, "SMU message", smu) < 0) {
562 printk(KERN_WARNING "SMU: can't "
563 "request interrupt %d\n",
564 smu->msg_irq);
565 smu->msg_irq = NO_IRQ;
566 }
567 }
568
569 return 0;
570}
571arch_initcall(smu_late_init);
572
573/*
574 * sysfs visibility
575 */
576
577static void smu_expose_childs(void *unused)
578{
579 struct device_node *np;
580
581 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
582 if (device_is_compatible(np, "smu-i2c")) {
583 char name[32];
584 u32 *reg = (u32 *)get_property(np, "reg", NULL);
585
586 if (reg == NULL)
587 continue;
588 sprintf(name, "smu-i2c-%02x", *reg);
589 of_platform_device_create(np, name, &smu->of_dev->dev);
590 }
591 }
592
593}
594
595static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
596
597static int smu_platform_probe(struct of_device* dev,
598 const struct of_device_id *match)
599{
600 if (!smu)
601 return -ENODEV;
602 smu->of_dev = dev;
603
604 /*
605 * Ok, we are matched, now expose all i2c busses. We have to defer
606 * that unfortunately or it would deadlock inside the device model
607 */
608 schedule_work(&smu_expose_childs_work);
609
610 return 0;
611}
612
613static struct of_device_id smu_platform_match[] =
614{
615 {
616 .type = "smu",
617 },
618 {},
619};
620
621static struct of_platform_driver smu_of_platform_driver =
622{
623 .name = "smu",
624 .match_table = smu_platform_match,
625 .probe = smu_platform_probe,
626};
627
628static int __init smu_init_sysfs(void)
629{
630 int rc;
631
632 /*
633 * Due to sysfs bogosity, a sysdev is not a real device, so
634 * we should in fact create both if we want sysdev semantics
635 * for power management.
636 * For now, we don't power manage machines with an SMU chip,
637 * I'm a bit too far from figuring out how that works with those
638 * new chipsets, but that will come back and bite us
639 */
640 rc = of_register_driver(&smu_of_platform_driver);
641 return 0;
642}
643
644device_initcall(smu_init_sysfs);
645
646struct of_device *smu_get_ofdev(void)
647{
648 if (!smu)
649 return NULL;
650 return smu->of_dev;
651}
652
653EXPORT_SYMBOL_GPL(smu_get_ofdev);
654
655/*
656 * i2c interface
657 */
658
659static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
660{
661 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
662 void *misc = cmd->misc;
663 unsigned long flags;
664
665 /* Check for read case */
666 if (!fail && cmd->read) {
667 if (cmd->pdata[0] < 1)
668 fail = 1;
669 else
670 memcpy(cmd->info.data, &cmd->pdata[1],
671 cmd->info.datalen);
672 }
673
674 DPRINTK("SMU: completing, success: %d\n", !fail);
675
676 /* Update status and mark no pending i2c command with lock
677 * held so nobody comes in while we dequeue an eventual
678 * pending next i2c command
679 */
680 spin_lock_irqsave(&smu->lock, flags);
681 smu->cmd_i2c_cur = NULL;
682 wmb();
683 cmd->status = fail ? -EIO : 0;
684
685 /* Is there another i2c command waiting ? */
686 if (!list_empty(&smu->cmd_i2c_list)) {
687 struct smu_i2c_cmd *newcmd;
688
689 /* Fetch it, new current, remove from list */
690 newcmd = list_entry(smu->cmd_i2c_list.next,
691 struct smu_i2c_cmd, link);
692 smu->cmd_i2c_cur = newcmd;
693 list_del(&cmd->link);
694
695 /* Queue with low level smu */
696 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
697 if (smu->cmd_cur == NULL)
698 smu_start_cmd();
699 }
700 spin_unlock_irqrestore(&smu->lock, flags);
701
702 /* Call command completion handler if any */
703 if (done)
704 done(cmd, misc);
705
706}
707
708
709static void smu_i2c_retry(unsigned long data)
710{
711 struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
712
713 DPRINTK("SMU: i2c failure, requeuing...\n");
714
715 /* requeue command simply by resetting reply_len */
716 cmd->pdata[0] = 0xff;
717 cmd->scmd.reply_len = 0x10;
718 smu_queue_cmd(&cmd->scmd);
719}
720
721
722static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
723{
724 struct smu_i2c_cmd *cmd = misc;
725 int fail = 0;
726
727 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
728 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
729
730 /* Check for possible status */
731 if (scmd->status < 0)
732 fail = 1;
733 else if (cmd->read) {
734 if (cmd->stage == 0)
735 fail = cmd->pdata[0] != 0;
736 else
737 fail = cmd->pdata[0] >= 0x80;
738 } else {
739 fail = cmd->pdata[0] != 0;
740 }
741
742 /* Handle failures by requeuing command, after 5ms interval
743 */
744 if (fail && --cmd->retries > 0) {
745 DPRINTK("SMU: i2c failure, starting timer...\n");
746 smu->i2c_timer.function = smu_i2c_retry;
747 smu->i2c_timer.data = (unsigned long)cmd;
748 smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
749 add_timer(&smu->i2c_timer);
750 return;
751 }
752
753 /* If failure or stage 1, command is complete */
754 if (fail || cmd->stage != 0) {
755 smu_i2c_complete_command(cmd, fail);
756 return;
757 }
758
759 DPRINTK("SMU: going to stage 1\n");
760
761 /* Ok, initial command complete, now poll status */
762 scmd->reply_buf = cmd->pdata;
763 scmd->reply_len = 0x10;
764 scmd->data_buf = cmd->pdata;
765 scmd->data_len = 1;
766 cmd->pdata[0] = 0;
767 cmd->stage = 1;
768 cmd->retries = 20;
769 smu_queue_cmd(scmd);
770}
771
772
773int smu_queue_i2c(struct smu_i2c_cmd *cmd)
774{
775 unsigned long flags;
776
777 if (smu == NULL)
778 return -ENODEV;
779
780 /* Fill most fields of scmd */
781 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
782 cmd->scmd.done = smu_i2c_low_completion;
783 cmd->scmd.misc = cmd;
784 cmd->scmd.reply_buf = cmd->pdata;
785 cmd->scmd.reply_len = 0x10;
786 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
787 cmd->scmd.status = 1;
788 cmd->stage = 0;
789 cmd->pdata[0] = 0xff;
790 cmd->retries = 20;
791 cmd->status = 1;
792
793 /* Check transfer type, sanitize some "info" fields
794 * based on transfer type and do more checking
795 */
796 cmd->info.caddr = cmd->info.devaddr;
797 cmd->read = cmd->info.devaddr & 0x01;
798 switch(cmd->info.type) {
799 case SMU_I2C_TRANSFER_SIMPLE:
800 memset(&cmd->info.sublen, 0, 4);
801 break;
802 case SMU_I2C_TRANSFER_COMBINED:
803 cmd->info.devaddr &= 0xfe;
804 case SMU_I2C_TRANSFER_STDSUB:
805 if (cmd->info.sublen > 3)
806 return -EINVAL;
807 break;
808 default:
809 return -EINVAL;
810 }
811
812 /* Finish setting up command based on transfer direction
813 */
814 if (cmd->read) {
815 if (cmd->info.datalen > SMU_I2C_READ_MAX)
816 return -EINVAL;
817 memset(cmd->info.data, 0xff, cmd->info.datalen);
818 cmd->scmd.data_len = 9;
819 } else {
820 if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
821 return -EINVAL;
822 cmd->scmd.data_len = 9 + cmd->info.datalen;
823 }
824
825 DPRINTK("SMU: i2c enqueuing command\n");
826 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
827 cmd->read ? "read" : "write", cmd->info.datalen,
828 cmd->info.bus, cmd->info.caddr,
829 cmd->info.subaddr[0], cmd->info.type);
830
831
832 /* Enqueue command in i2c list, and if empty, enqueue also in
833 * main command list
834 */
835 spin_lock_irqsave(&smu->lock, flags);
836 if (smu->cmd_i2c_cur == NULL) {
837 smu->cmd_i2c_cur = cmd;
838 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
839 if (smu->cmd_cur == NULL)
840 smu_start_cmd();
841 } else
842 list_add_tail(&cmd->link, &smu->cmd_i2c_list);
843 spin_unlock_irqrestore(&smu->lock, flags);
844
845 return 0;
846}
847
848
849
850/*
851 * Userland driver interface
852 */
853
854
855static LIST_HEAD(smu_clist);
856static DEFINE_SPINLOCK(smu_clist_lock);
857
858enum smu_file_mode {
859 smu_file_commands,
860 smu_file_events,
861 smu_file_closing
862};
863
864struct smu_private
865{
866 struct list_head list;
867 enum smu_file_mode mode;
868 int busy;
869 struct smu_cmd cmd;
870 spinlock_t lock;
871 wait_queue_head_t wait;
872 u8 buffer[SMU_MAX_DATA];
873};
874
875
876static int smu_open(struct inode *inode, struct file *file)
877{
878 struct smu_private *pp;
879 unsigned long flags;
880
881 pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
882 if (pp == 0)
883 return -ENOMEM;
884 memset(pp, 0, sizeof(struct smu_private));
885 spin_lock_init(&pp->lock);
886 pp->mode = smu_file_commands;
887 init_waitqueue_head(&pp->wait);
888
889 spin_lock_irqsave(&smu_clist_lock, flags);
890 list_add(&pp->list, &smu_clist);
891 spin_unlock_irqrestore(&smu_clist_lock, flags);
892 file->private_data = pp;
893
894 return 0;
895}
896
897
898static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
899{
900 struct smu_private *pp = misc;
901
902 wake_up_all(&pp->wait);
903}
904
905
906static ssize_t smu_write(struct file *file, const char __user *buf,
907 size_t count, loff_t *ppos)
908{
909 struct smu_private *pp = file->private_data;
910 unsigned long flags;
911 struct smu_user_cmd_hdr hdr;
912 int rc = 0;
913
914 if (pp->busy)
915 return -EBUSY;
916 else if (copy_from_user(&hdr, buf, sizeof(hdr)))
917 return -EFAULT;
918 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
919 pp->mode = smu_file_events;
920 return 0;
921 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
922 return -EINVAL;
923 else if (pp->mode != smu_file_commands)
924 return -EBADFD;
925 else if (hdr.data_len > SMU_MAX_DATA)
926 return -EINVAL;
927
928 spin_lock_irqsave(&pp->lock, flags);
929 if (pp->busy) {
930 spin_unlock_irqrestore(&pp->lock, flags);
931 return -EBUSY;
932 }
933 pp->busy = 1;
934 pp->cmd.status = 1;
935 spin_unlock_irqrestore(&pp->lock, flags);
936
937 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
938 pp->busy = 0;
939 return -EFAULT;
940 }
941
942 pp->cmd.cmd = hdr.cmd;
943 pp->cmd.data_len = hdr.data_len;
944 pp->cmd.reply_len = SMU_MAX_DATA;
945 pp->cmd.data_buf = pp->buffer;
946 pp->cmd.reply_buf = pp->buffer;
947 pp->cmd.done = smu_user_cmd_done;
948 pp->cmd.misc = pp;
949 rc = smu_queue_cmd(&pp->cmd);
950 if (rc < 0)
951 return rc;
952 return count;
953}
954
955
956static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
957 char __user *buf, size_t count)
958{
959 DECLARE_WAITQUEUE(wait, current);
960 struct smu_user_reply_hdr hdr;
961 unsigned long flags;
962 int size, rc = 0;
963
964 if (!pp->busy)
965 return 0;
966 if (count < sizeof(struct smu_user_reply_hdr))
967 return -EOVERFLOW;
968 spin_lock_irqsave(&pp->lock, flags);
969 if (pp->cmd.status == 1) {
970 if (file->f_flags & O_NONBLOCK)
971 return -EAGAIN;
972 add_wait_queue(&pp->wait, &wait);
973 for (;;) {
974 set_current_state(TASK_INTERRUPTIBLE);
975 rc = 0;
976 if (pp->cmd.status != 1)
977 break;
978 rc = -ERESTARTSYS;
979 if (signal_pending(current))
980 break;
981 spin_unlock_irqrestore(&pp->lock, flags);
982 schedule();
983 spin_lock_irqsave(&pp->lock, flags);
984 }
985 set_current_state(TASK_RUNNING);
986 remove_wait_queue(&pp->wait, &wait);
987 }
988 spin_unlock_irqrestore(&pp->lock, flags);
989 if (rc)
990 return rc;
991 if (pp->cmd.status != 0)
992 pp->cmd.reply_len = 0;
993 size = sizeof(hdr) + pp->cmd.reply_len;
994 if (count < size)
995 size = count;
996 rc = size;
997 hdr.status = pp->cmd.status;
998 hdr.reply_len = pp->cmd.reply_len;
999 if (copy_to_user(buf, &hdr, sizeof(hdr)))
1000 return -EFAULT;
1001 size -= sizeof(hdr);
1002 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1003 return -EFAULT;
1004 pp->busy = 0;
1005
1006 return rc;
1007}
1008
1009
1010static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1011 char __user *buf, size_t count)
1012{
1013 /* Not implemented */
1014 msleep_interruptible(1000);
1015 return 0;
1016}
1017
1018
1019static ssize_t smu_read(struct file *file, char __user *buf,
1020 size_t count, loff_t *ppos)
1021{
1022 struct smu_private *pp = file->private_data;
1023
1024 if (pp->mode == smu_file_commands)
1025 return smu_read_command(file, pp, buf, count);
1026 if (pp->mode == smu_file_events)
1027 return smu_read_events(file, pp, buf, count);
1028
1029 return -EBADFD;
1030}
1031
1032static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1033{
1034 struct smu_private *pp = file->private_data;
1035 unsigned int mask = 0;
1036 unsigned long flags;
1037
1038 if (pp == 0)
1039 return 0;
1040
1041 if (pp->mode == smu_file_commands) {
1042 poll_wait(file, &pp->wait, wait);
1043
1044 spin_lock_irqsave(&pp->lock, flags);
1045 if (pp->busy && pp->cmd.status != 1)
1046 mask |= POLLIN;
1047 spin_unlock_irqrestore(&pp->lock, flags);
1048 } if (pp->mode == smu_file_events) {
1049 /* Not yet implemented */
1050 }
1051 return mask;
1052}
1053
1054static int smu_release(struct inode *inode, struct file *file)
1055{
1056 struct smu_private *pp = file->private_data;
1057 unsigned long flags;
1058 unsigned int busy;
1059
1060 if (pp == 0)
1061 return 0;
1062
1063 file->private_data = NULL;
1064
1065 /* Mark file as closing to avoid races with new request */
1066 spin_lock_irqsave(&pp->lock, flags);
1067 pp->mode = smu_file_closing;
1068 busy = pp->busy;
1069
1070 /* Wait for any pending request to complete */
1071 if (busy && pp->cmd.status == 1) {
1072 DECLARE_WAITQUEUE(wait, current);
1073
1074 add_wait_queue(&pp->wait, &wait);
1075 for (;;) {
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077 if (pp->cmd.status != 1)
1078 break;
1079 spin_lock_irqsave(&pp->lock, flags);
1080 schedule();
1081 spin_unlock_irqrestore(&pp->lock, flags);
1082 }
1083 set_current_state(TASK_RUNNING);
1084 remove_wait_queue(&pp->wait, &wait);
1085 }
1086 spin_unlock_irqrestore(&pp->lock, flags);
1087
1088 spin_lock_irqsave(&smu_clist_lock, flags);
1089 list_del(&pp->list);
1090 spin_unlock_irqrestore(&smu_clist_lock, flags);
1091 kfree(pp);
1092
1093 return 0;
1094}
1095
1096
1097static struct file_operations smu_device_fops __pmacdata = {
1098 .llseek = no_llseek,
1099 .read = smu_read,
1100 .write = smu_write,
1101 .poll = smu_fpoll,
1102 .open = smu_open,
1103 .release = smu_release,
1104};
1105
1106static struct miscdevice pmu_device __pmacdata = {
1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1108};
1109
1110static int smu_device_init(void)
1111{
1112 if (!smu)
1113 return -ENODEV;
1114 if (misc_register(&pmu_device) < 0)
1115 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1116 return 0;
1117}
1118device_initcall(smu_device_init);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c9ca1118e449..f38696622eb4 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -599,7 +599,7 @@ thermostat_init(void)
599 sensor_location[2] = "?"; 599 sensor_location[2] = "?";
600 } 600 }
601 601
602 of_dev = of_platform_device_create(np, "temperatures"); 602 of_dev = of_platform_device_create(np, "temperatures", NULL);
603 603
604 if (of_dev == NULL) { 604 if (of_dev == NULL) {
605 printk(KERN_ERR "Can't register temperatures device !\n"); 605 printk(KERN_ERR "Can't register temperatures device !\n");
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 703e31973314..cc507ceef153 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void)
2051 return -ENODEV; 2051 return -ENODEV;
2052 } 2052 }
2053 } 2053 }
2054 of_dev = of_platform_device_create(np, "temperature"); 2054 of_dev = of_platform_device_create(np, "temperature", NULL);
2055 if (of_dev == NULL) { 2055 if (of_dev == NULL) {
2056 printk(KERN_ERR "Can't register FCU platform device !\n"); 2056 printk(KERN_ERR "Can't register FCU platform device !\n");
2057 return -ENODEV; 2057 return -ENODEV;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index cbb72eb0426d..6aaa1df1a64e 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -504,7 +504,7 @@ g4fan_init( void )
504 } 504 }
505 if( !(np=of_find_node_by_name(NULL, "fan")) ) 505 if( !(np=of_find_node_by_name(NULL, "fan")) )
506 return -ENODEV; 506 return -ENODEV;
507 x.of_dev = of_platform_device_create( np, "temperature" ); 507 x.of_dev = of_platform_device_create(np, "temperature", NULL);
508 of_node_put( np ); 508 of_node_put( np );
509 509
510 if( !x.of_dev ) { 510 if( !x.of_dev ) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 200a0688f717..54ec737195e0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -230,11 +230,20 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
230 230
231static void __hash_remove(struct hash_cell *hc) 231static void __hash_remove(struct hash_cell *hc)
232{ 232{
233 struct dm_table *table;
234
233 /* remove from the dev hash */ 235 /* remove from the dev hash */
234 list_del(&hc->uuid_list); 236 list_del(&hc->uuid_list);
235 list_del(&hc->name_list); 237 list_del(&hc->name_list);
236 unregister_with_devfs(hc); 238 unregister_with_devfs(hc);
237 dm_set_mdptr(hc->md, NULL); 239 dm_set_mdptr(hc->md, NULL);
240
241 table = dm_get_table(hc->md);
242 if (table) {
243 dm_table_event(table);
244 dm_table_put(table);
245 }
246
238 dm_put(hc->md); 247 dm_put(hc->md);
239 if (hc->new_map) 248 if (hc->new_map)
240 dm_table_put(hc->new_map); 249 dm_table_put(hc->new_map);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 785806bdb248..f9b7b32d5d5c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -329,13 +329,17 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
329/* 329/*
330 * If we run out of usable paths, should we queue I/O or error it? 330 * If we run out of usable paths, should we queue I/O or error it?
331 */ 331 */
332static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) 332static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
333 unsigned save_old_value)
333{ 334{
334 unsigned long flags; 335 unsigned long flags;
335 336
336 spin_lock_irqsave(&m->lock, flags); 337 spin_lock_irqsave(&m->lock, flags);
337 338
338 m->saved_queue_if_no_path = m->queue_if_no_path; 339 if (save_old_value)
340 m->saved_queue_if_no_path = m->queue_if_no_path;
341 else
342 m->saved_queue_if_no_path = queue_if_no_path;
339 m->queue_if_no_path = queue_if_no_path; 343 m->queue_if_no_path = queue_if_no_path;
340 if (!m->queue_if_no_path && m->queue_size) 344 if (!m->queue_if_no_path && m->queue_size)
341 queue_work(kmultipathd, &m->process_queued_ios); 345 queue_work(kmultipathd, &m->process_queued_ios);
@@ -677,7 +681,7 @@ static int parse_features(struct arg_set *as, struct multipath *m,
677 return 0; 681 return 0;
678 682
679 if (!strnicmp(shift(as), MESG_STR("queue_if_no_path"))) 683 if (!strnicmp(shift(as), MESG_STR("queue_if_no_path")))
680 return queue_if_no_path(m, 1); 684 return queue_if_no_path(m, 1, 0);
681 else { 685 else {
682 ti->error = "Unrecognised multipath feature request"; 686 ti->error = "Unrecognised multipath feature request";
683 return -EINVAL; 687 return -EINVAL;
@@ -1077,7 +1081,7 @@ static void multipath_presuspend(struct dm_target *ti)
1077{ 1081{
1078 struct multipath *m = (struct multipath *) ti->private; 1082 struct multipath *m = (struct multipath *) ti->private;
1079 1083
1080 queue_if_no_path(m, 0); 1084 queue_if_no_path(m, 0, 1);
1081} 1085}
1082 1086
1083/* 1087/*
@@ -1222,9 +1226,9 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1222 1226
1223 if (argc == 1) { 1227 if (argc == 1) {
1224 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) 1228 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
1225 return queue_if_no_path(m, 1); 1229 return queue_if_no_path(m, 1, 0);
1226 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) 1230 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
1227 return queue_if_no_path(m, 0); 1231 return queue_if_no_path(m, 0, 0);
1228 } 1232 }
1229 1233
1230 if (argc != 2) 1234 if (argc != 2)
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 87d5f4d8790f..eaf130e666d8 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -100,8 +100,8 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg)
100 100
101 ret = i2c_transfer (state->i2c, msg, 2); 101 ret = i2c_transfer (state->i2c, msg, 2);
102 if (ret != 2) 102 if (ret != 2)
103 printk("DVB: TDA10021(%d): %s: readreg error (ret == %i)\n", 103 printk("DVB: TDA10021: %s: readreg error (ret == %i)\n",
104 state->frontend.dvb->num, __FUNCTION__, ret); 104 __FUNCTION__, ret);
105 return b1[0]; 105 return b1[0];
106} 106}
107 107
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 190977a1e549..6c332800d6ab 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -2398,7 +2398,7 @@ struct tvcard bttv_tvcards[] = {
2398 .svhs = 2, 2398 .svhs = 2,
2399 .muxsel = { 2, 3 }, 2399 .muxsel = { 2, 3 },
2400 .gpiomask = 0x00e00007, 2400 .gpiomask = 0x00e00007,
2401 .audiomux = { 0x00400005, 0, 0, 0, 0, 0 }, 2401 .audiomux = { 0x00400005, 0, 0x00000001, 0, 0x00c00007, 0 },
2402 .no_msp34xx = 1, 2402 .no_msp34xx = 1,
2403 .no_tda9875 = 1, 2403 .no_tda9875 = 1,
2404 .no_tda7432 = 1, 2404 .no_tda7432 = 1,
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index a564321db2f0..c062a017491e 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv)
763 /* no PLL needed */ 763 /* no PLL needed */
764 if (btv->pll.pll_current == 0) 764 if (btv->pll.pll_current == 0)
765 return; 765 return;
766 vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n", 766 bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
767 btv->c.nr,btv->pll.pll_ifreq); 767 btv->c.nr,btv->pll.pll_ifreq);
768 btwrite(0x00,BT848_TGCTRL); 768 btwrite(0x00,BT848_TGCTRL);
769 btwrite(0x00,BT848_PLL_XCI); 769 btwrite(0x00,BT848_PLL_XCI);
770 btv->pll.pll_current = 0; 770 btv->pll.pll_current = 0;
771 return; 771 return;
772 } 772 }
773 773
774 vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr, 774 bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
775 btv->pll.pll_ifreq, btv->pll.pll_ofreq); 775 btv->pll.pll_ifreq, btv->pll.pll_ofreq);
776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq); 776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
777 777
778 for (i=0; i<10; i++) { 778 for (i=0; i<10; i++) {
779 /* Let other people run while the PLL stabilizes */ 779 /* Let other people run while the PLL stabilizes */
780 vprintk("."); 780 bttv_printk(".");
781 msleep(10); 781 msleep(10);
782 782
783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) { 783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
@@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv)
785 } else { 785 } else {
786 btwrite(0x08,BT848_TGCTRL); 786 btwrite(0x08,BT848_TGCTRL);
787 btv->pll.pll_current = btv->pll.pll_ofreq; 787 btv->pll.pll_current = btv->pll.pll_ofreq;
788 vprintk(" ok\n"); 788 bttv_printk(" ok\n");
789 return; 789 return;
790 } 790 }
791 } 791 }
792 btv->pll.pll_current = -1; 792 btv->pll.pll_current = -1;
793 vprintk("failed\n"); 793 bttv_printk("failed\n");
794 return; 794 return;
795} 795}
796 796
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 9b0b7ca035f8..7a312f79340a 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
221extern int init_bttv_i2c(struct bttv *btv); 221extern int init_bttv_i2c(struct bttv *btv);
222extern int fini_bttv_i2c(struct bttv *btv); 222extern int fini_bttv_i2c(struct bttv *btv);
223 223
224#define vprintk if (bttv_verbose) printk 224#define bttv_printk if (bttv_verbose) printk
225#define dprintk if (bttv_debug >= 1) printk 225#define dprintk if (bttv_debug >= 1) printk
226#define d2printk if (bttv_debug >= 2) printk 226#define d2printk if (bttv_debug >= 2) printk
227 227
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 8c08b7f1ad23..b7ec9bf45085 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1397,7 +1397,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
1397 1397
1398static void proc_cpia_create(void) 1398static void proc_cpia_create(void)
1399{ 1399{
1400 cpia_proc_root = create_proc_entry("cpia", S_IFDIR, NULL); 1400 cpia_proc_root = proc_mkdir("cpia", NULL);
1401 1401
1402 if (cpia_proc_root) 1402 if (cpia_proc_root)
1403 cpia_proc_root->owner = THIS_MODULE; 1403 cpia_proc_root->owner = THIS_MODULE;
diff --git a/drivers/media/video/rds.h b/drivers/media/video/rds.h
index 30337d0f1a87..0d30eb744e61 100644
--- a/drivers/media/video/rds.h
+++ b/drivers/media/video/rds.h
@@ -31,7 +31,7 @@
31struct rds_command { 31struct rds_command {
32 unsigned int block_count; 32 unsigned int block_count;
33 int result; 33 int result;
34 unsigned char *buffer; 34 unsigned char __user *buffer;
35 struct file *instance; 35 struct file *instance;
36 poll_table *event_list; 36 poll_table *event_list;
37}; 37};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 1a657a70ff43..72b70eb5da1d 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -157,7 +157,7 @@ static struct i2c_client client_template;
157 157
158/* ---------------------------------------------------------------------- */ 158/* ---------------------------------------------------------------------- */
159 159
160static int block_to_user_buf(struct saa6588 *s, unsigned char *user_buf) 160static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
161{ 161{
162 int i; 162 int i;
163 163
@@ -191,7 +191,7 @@ static void read_from_buf(struct saa6588 *s, struct rds_command *a)
191{ 191{
192 unsigned long flags; 192 unsigned long flags;
193 193
194 unsigned char *buf_ptr = a->buffer; /* This is a user space buffer! */ 194 unsigned char __user *buf_ptr = a->buffer;
195 unsigned int i; 195 unsigned int i;
196 unsigned int rd_blocks; 196 unsigned int rd_blocks;
197 197
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index a851d65c7cfe..a260f83bcb02 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -48,8 +48,8 @@ struct ucb1x00_ts {
48 u16 x_res; 48 u16 x_res;
49 u16 y_res; 49 u16 y_res;
50 50
51 int restart:1; 51 unsigned int restart:1;
52 int adcsync:1; 52 unsigned int adcsync:1;
53}; 53};
54 54
55static int adcsync; 55static int adcsync;
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 9a087c1fb0b7..24f670b5a4f3 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -40,7 +40,7 @@
40#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/doc2000.h> 41#include <linux/mtd/doc2000.h>
42 42
43#define DEBUG 0 43#define DEBUG_ECC 0
44/* need to undef it (from asm/termbits.h) */ 44/* need to undef it (from asm/termbits.h) */
45#undef B0 45#undef B0
46 46
@@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
249 lambda[j] ^= Alpha_to[modnn(u + tmp)]; 249 lambda[j] ^= Alpha_to[modnn(u + tmp)];
250 } 250 }
251 } 251 }
252#if DEBUG >= 1 252#if DEBUG_ECC >= 1
253 /* Test code that verifies the erasure locator polynomial just constructed 253 /* Test code that verifies the erasure locator polynomial just constructed
254 Needed only for decoder debugging. */ 254 Needed only for decoder debugging. */
255 255
@@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
276 count = -1; 276 count = -1;
277 goto finish; 277 goto finish;
278 } 278 }
279#if DEBUG >= 2 279#if DEBUG_ECC >= 2
280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); 280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
281 for (i = 0; i < count; i++) 281 for (i = 0; i < count; i++)
282 printf("%d ", loc[i]); 282 printf("%d ", loc[i]);
@@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; 409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
410 } 410 }
411 if (den == 0) { 411 if (den == 0) {
412#if DEBUG >= 1 412#if DEBUG_ECC >= 1
413 printf("\n ERROR: denominator = 0\n"); 413 printf("\n ERROR: denominator = 0\n");
414#endif 414#endif
415 /* Convert to dual- basis */ 415 /* Convert to dual- basis */
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
index 0c45464e3f7b..0ba0ff7d43b9 100644
--- a/drivers/mtd/maps/bast-flash.c
+++ b/drivers/mtd/maps/bast-flash.c
@@ -39,7 +39,6 @@
39#include <linux/mtd/partitions.h> 39#include <linux/mtd/partitions.h>
40 40
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/mach-types.h>
43#include <asm/mach/flash.h> 42#include <asm/mach/flash.h>
44 43
45#include <asm/arch/map.h> 44#include <asm/arch/map.h>
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 3e94b616743d..a9f86c7fbd52 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -30,7 +30,6 @@
30 30
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/hardware.h> 32#include <asm/hardware.h>
33#include <asm/mach-types.h>
34#include <asm/mach/flash.h> 33#include <asm/mach/flash.h>
35 34
36#include <linux/reboot.h> 35#include <linux/reboot.h>
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 5afe660aa2c4..3fcc32884074 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -26,7 +26,6 @@
26#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/mach-types.h>
30#include <asm/mach/flash.h> 29#include <asm/mach/flash.h>
31 30
32#include <linux/reboot.h> 31#include <linux/reboot.h>
@@ -254,6 +253,6 @@ module_init(ixp4xx_flash_init);
254module_exit(ixp4xx_flash_exit); 253module_exit(ixp4xx_flash_exit);
255 254
256MODULE_LICENSE("GPL"); 255MODULE_LICENSE("GPL");
257MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems") 256MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
258MODULE_AUTHOR("Deepak Saxena"); 257MODULE_AUTHOR("Deepak Saxena");
259 258
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 8cc71409a328..b17bca657daf 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -42,7 +42,6 @@
42 42
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/hardware.h> 44#include <asm/hardware.h>
45#include <asm/mach-types.h>
46#include <asm/mach/flash.h> 45#include <asm/mach/flash.h>
47#include <asm/arch/tc.h> 46#include <asm/arch/tc.h>
48 47
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 52385705da09..8dcaa357b4bb 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -21,7 +21,6 @@
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/mtd/concat.h> 22#include <linux/mtd/concat.h>
23 23
24#include <asm/mach-types.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/sizes.h> 25#include <asm/sizes.h>
27#include <asm/mach/flash.h> 26#include <asm/mach/flash.h>
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 891e3a1b9110..b47ebcb31e0f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -58,7 +58,6 @@
58#include <linux/mtd/partitions.h> 58#include <linux/mtd/partitions.h>
59 59
60#include <asm/io.h> 60#include <asm/io.h>
61#include <asm/mach-types.h>
62#include <asm/hardware/clock.h> 61#include <asm/hardware/clock.h>
63 62
64#include <asm/arch/regs-nand.h> 63#include <asm/arch/regs-nand.h>
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 6d76f3a99b17..f87027420081 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1094 1094
1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1096 1096
1097 if (inb_p(e8390_base) & E8390_TRANS) 1097 if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
1098 { 1098 {
1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
1100 dev->name); 1100 dev->name);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index db8898100325..018b11a7a4ce 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -548,6 +548,14 @@ config SUNGEM
548 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also 548 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
549 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>. 549 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
550 550
551config CASSINI
552 tristate "Sun Cassini support"
553 depends on NET_ETHERNET && PCI
554 select CRC32
555 help
556 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
557 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
558
551config NET_VENDOR_3COM 559config NET_VENDOR_3COM
552 bool "3COM cards" 560 bool "3COM cards"
553 depends on NET_ETHERNET && (ISA || EISA || MCA || PCI) 561 depends on NET_ETHERNET && (ISA || EISA || MCA || PCI)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 39b17cb4f86e..4c9477cb2127 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o
28obj-$(CONFIG_SUNBMAC) += sunbmac.o 28obj-$(CONFIG_SUNBMAC) += sunbmac.o
29obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o 29obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
30obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o 30obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
31obj-$(CONFIG_CASSINI) += cassini.o
31 32
32obj-$(CONFIG_MACE) += mace.o 33obj-$(CONFIG_MACE) += mace.o
33obj-$(CONFIG_BMAC) += bmac.o 34obj-$(CONFIG_BMAC) += bmac.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 9b659e3c8d67..c56d86d371a9 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -15,16 +15,13 @@
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/fcntl.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/ioport.h> 19#include <linux/ioport.h>
21#include <linux/in.h>
22#include <linux/slab.h> 20#include <linux/slab.h>
23#include <linux/string.h> 21#include <linux/string.h>
24#include <linux/errno.h> 22#include <linux/errno.h>
25#include <linux/netdevice.h> 23#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 24#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
29#include <linux/init.h> 26#include <linux/init.h>
30#include <linux/crc32.h> 27#include <linux/crc32.h>
@@ -33,7 +30,6 @@
33#include <asm/system.h> 30#include <asm/system.h>
34#include <asm/irq.h> 31#include <asm/irq.h>
35#include <asm/io.h> 32#include <asm/io.h>
36#include <asm/dma.h>
37 33
38#define TX_BUFFERS 15 34#define TX_BUFFERS 15
39#define RX_BUFFERS 25 35#define RX_BUFFERS 25
@@ -85,7 +81,7 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
85 u_short v; 81 u_short v;
86 __asm__( 82 __asm__(
87 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "str%?h %1, [%2] @ NAT_RAP\n\t"
88 "str%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
89 : "=r" (v) 85 : "=r" (v)
90 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
91 return v; 87 return v;
@@ -288,7 +284,7 @@ static void am79c961_timer(unsigned long data)
288 else if (!lnkstat && carrier) 284 else if (!lnkstat && carrier)
289 netif_carrier_off(dev); 285 netif_carrier_off(dev);
290 286
291 mod_timer(&priv->timer, jiffies + 5*HZ); 287 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
292} 288}
293 289
294/* 290/*
@@ -709,13 +705,9 @@ static int __init am79c961_init(void)
709 goto release; 705 goto release;
710 706
711 am79c961_banner(); 707 am79c961_banner();
712 printk(KERN_INFO "%s: ether address ", dev->name);
713 708
714 /* Retrive and print the ethernet address. */ 709 for (i = 0; i < 6; i++)
715 for (i = 0; i < 6; i++) {
716 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; 710 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
717 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
718 }
719 711
720 spin_lock_init(&priv->chip_lock); 712 spin_lock_init(&priv->chip_lock);
721 init_timer(&priv->timer); 713 init_timer(&priv->timer);
@@ -736,8 +728,14 @@ static int __init am79c961_init(void)
736#endif 728#endif
737 729
738 ret = register_netdev(dev); 730 ret = register_netdev(dev);
739 if (ret == 0) 731 if (ret == 0) {
732 printk(KERN_INFO "%s: ether address ", dev->name);
733
734 for (i = 0; i < 6; i++)
735 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
736
740 return 0; 737 return 0;
738 }
741 739
742release: 740release:
743 release_region(dev->base_addr, 0x18); 741 release_region(dev->base_addr, 0x18);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 8dc657fc8afb..60dba4a1ca5c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -218,7 +218,7 @@ void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
218 218
219 219
220static inline 220static inline
221volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 221unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
222{ 222{
223 return in_le16((void __iomem *)dev->base_addr + reg_offset); 223 return in_le16((void __iomem *)dev->base_addr + reg_offset);
224} 224}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6d00c3de1a83..bf81cd45e4d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2776,7 +2776,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev)
2776 return 0; 2776 return 0;
2777 2777
2778 rcu_read_lock(); 2778 rcu_read_lock();
2779 idev = __in_dev_get(dev); 2779 idev = __in_dev_get_rcu(dev);
2780 if (!idev) 2780 if (!idev)
2781 goto out; 2781 goto out;
2782 2782
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
new file mode 100644
index 000000000000..2e617424d3fb
--- /dev/null
+++ b/drivers/net/cassini.c
@@ -0,0 +1,5237 @@
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
2 *
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 *
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
23 *
24 * The cassini chip has a number of features that distinguish it from
25 * the gem chip:
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
33 *
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
37 * on them.
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
40 * to make them happy.
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
46 * the page.
47 *
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
50 *
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
56 * ioctl.
57 *
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
64 * instead.
65 *
66 * by default, the selective clear mask is set up to process rx packets.
67 */
68
69#include <linux/config.h>
70#include <linux/version.h>
71
72#include <linux/module.h>
73#include <linux/kernel.h>
74#include <linux/types.h>
75#include <linux/compiler.h>
76#include <linux/slab.h>
77#include <linux/delay.h>
78#include <linux/init.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/mm.h>
82#include <linux/highmem.h>
83#include <linux/list.h>
84#include <linux/dma-mapping.h>
85
86#include <linux/netdevice.h>
87#include <linux/etherdevice.h>
88#include <linux/skbuff.h>
89#include <linux/ethtool.h>
90#include <linux/crc32.h>
91#include <linux/random.h>
92#include <linux/mii.h>
93#include <linux/ip.h>
94#include <linux/tcp.h>
95
96#include <net/checksum.h>
97
98#include <asm/atomic.h>
99#include <asm/system.h>
100#include <asm/io.h>
101#include <asm/byteorder.h>
102#include <asm/uaccess.h>
103
104#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
105#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
106#define CAS_NCPUS num_online_cpus()
107
108#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
109#define USE_NAPI
110#define cas_skb_release(x) netif_receive_skb(x)
111#else
112#define cas_skb_release(x) netif_rx(x)
113#endif
114
115/* select which firmware to use */
116#define USE_HP_WORKAROUND
117#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
118#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
119
120#include "cassini.h"
121
122#define USE_TX_COMPWB /* use completion writeback registers */
123#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
124#define USE_RX_BLANK /* hw interrupt mitigation */
125#undef USE_ENTROPY_DEV /* don't test for entropy device */
126
127/* NOTE: these aren't useable unless PCI interrupts can be assigned.
128 * also, we need to make cp->lock finer-grained.
129 */
130#undef USE_PCI_INTB
131#undef USE_PCI_INTC
132#undef USE_PCI_INTD
133#undef USE_QOS
134
135#undef USE_VPD_DEBUG /* debug vpd information if defined */
136
137/* rx processing options */
138#define USE_PAGE_ORDER /* specify to allocate large rx pages */
139#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
140#define RX_COPY_ALWAYS 0 /* if 0, use frags */
141#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
142#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
143
144#define DRV_MODULE_NAME "cassini"
145#define PFX DRV_MODULE_NAME ": "
146#define DRV_MODULE_VERSION "1.4"
147#define DRV_MODULE_RELDATE "1 July 2004"
148
149#define CAS_DEF_MSG_ENABLE \
150 (NETIF_MSG_DRV | \
151 NETIF_MSG_PROBE | \
152 NETIF_MSG_LINK | \
153 NETIF_MSG_TIMER | \
154 NETIF_MSG_IFDOWN | \
155 NETIF_MSG_IFUP | \
156 NETIF_MSG_RX_ERR | \
157 NETIF_MSG_TX_ERR)
158
159/* length of time before we decide the hardware is borked,
160 * and dev->tx_timeout() should be called to fix the problem
161 */
162#define CAS_TX_TIMEOUT (HZ)
163#define CAS_LINK_TIMEOUT (22*HZ/10)
164#define CAS_LINK_FAST_TIMEOUT (1)
165
166/* timeout values for state changing. these specify the number
167 * of 10us delays to be used before giving up.
168 */
169#define STOP_TRIES_PHY 1000
170#define STOP_TRIES 5000
171
172/* specify a minimum frame size to deal with some fifo issues
173 * max mtu == 2 * page size - ethernet header - 64 - swivel =
174 * 2 * page_size - 0x50
175 */
176#define CAS_MIN_FRAME 97
177#define CAS_1000MB_MIN_FRAME 255
178#define CAS_MIN_MTU 60
179#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
180
181#if 1
182/*
183 * Eliminate these and use separate atomic counters for each, to
184 * avoid a race condition.
185 */
186#else
187#define CAS_RESET_MTU 1
188#define CAS_RESET_ALL 2
189#define CAS_RESET_SPARE 3
190#endif
191
192static char version[] __devinitdata =
193 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
194
195MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
196MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
197MODULE_LICENSE("GPL");
198MODULE_PARM(cassini_debug, "i");
199MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
200MODULE_PARM(link_mode, "i");
201MODULE_PARM_DESC(link_mode, "default link mode");
202
203/*
204 * Work around for a PCS bug in which the link goes down due to the chip
205 * being confused and never showing a link status of "up."
206 */
207#define DEFAULT_LINKDOWN_TIMEOUT 5
208/*
209 * Value in seconds, for user input.
210 */
211static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
212MODULE_PARM(linkdown_timeout, "i");
213MODULE_PARM_DESC(linkdown_timeout,
214"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
215
216/*
217 * value in 'ticks' (units used by jiffies). Set when we init the
218 * module because 'HZ' in actually a function call on some flavors of
219 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
220 */
221static int link_transition_timeout;
222
223
224static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
225static int link_mode;
226
227static u16 link_modes[] __devinitdata = {
228 BMCR_ANENABLE, /* 0 : autoneg */
229 0, /* 1 : 10bt half duplex */
230 BMCR_SPEED100, /* 2 : 100bt half duplex */
231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */
232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
234};
235
236static struct pci_device_id cas_pci_tbl[] __devinitdata = {
237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { 0, }
242};
243
244MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
245
246static void cas_set_link_modes(struct cas *cp);
247
248static inline void cas_lock_tx(struct cas *cp)
249{
250 int i;
251
252 for (i = 0; i < N_TX_RINGS; i++)
253 spin_lock(&cp->tx_lock[i]);
254}
255
256static inline void cas_lock_all(struct cas *cp)
257{
258 spin_lock_irq(&cp->lock);
259 cas_lock_tx(cp);
260}
261
262/* WTZ: QA was finding deadlock problems with the previous
263 * versions after long test runs with multiple cards per machine.
264 * See if replacing cas_lock_all with safer versions helps. The
265 * symptoms QA is reporting match those we'd expect if interrupts
266 * aren't being properly restored, and we fixed a previous deadlock
267 * with similar symptoms by using save/restore versions in other
268 * places.
269 */
270#define cas_lock_all_save(cp, flags) \
271do { \
272 struct cas *xxxcp = (cp); \
273 spin_lock_irqsave(&xxxcp->lock, flags); \
274 cas_lock_tx(xxxcp); \
275} while (0)
276
277static inline void cas_unlock_tx(struct cas *cp)
278{
279 int i;
280
281 for (i = N_TX_RINGS; i > 0; i--)
282 spin_unlock(&cp->tx_lock[i - 1]);
283}
284
285static inline void cas_unlock_all(struct cas *cp)
286{
287 cas_unlock_tx(cp);
288 spin_unlock_irq(&cp->lock);
289}
290
291#define cas_unlock_all_restore(cp, flags) \
292do { \
293 struct cas *xxxcp = (cp); \
294 cas_unlock_tx(xxxcp); \
295 spin_unlock_irqrestore(&xxxcp->lock, flags); \
296} while (0)
297
298static void cas_disable_irq(struct cas *cp, const int ring)
299{
300 /* Make sure we won't get any more interrupts */
301 if (ring == 0) {
302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
303 return;
304 }
305
306 /* disable completion interrupts and selectively mask */
307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
308 switch (ring) {
309#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
310#ifdef USE_PCI_INTB
311 case 1:
312#endif
313#ifdef USE_PCI_INTC
314 case 2:
315#endif
316#ifdef USE_PCI_INTD
317 case 3:
318#endif
319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
320 cp->regs + REG_PLUS_INTRN_MASK(ring));
321 break;
322#endif
323 default:
324 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
325 REG_PLUS_INTRN_MASK(ring));
326 break;
327 }
328 }
329}
330
331static inline void cas_mask_intr(struct cas *cp)
332{
333 int i;
334
335 for (i = 0; i < N_RX_COMP_RINGS; i++)
336 cas_disable_irq(cp, i);
337}
338
339static void cas_enable_irq(struct cas *cp, const int ring)
340{
341 if (ring == 0) { /* all but TX_DONE */
342 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
343 return;
344 }
345
346 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
347 switch (ring) {
348#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
349#ifdef USE_PCI_INTB
350 case 1:
351#endif
352#ifdef USE_PCI_INTC
353 case 2:
354#endif
355#ifdef USE_PCI_INTD
356 case 3:
357#endif
358 writel(INTRN_MASK_RX_EN, cp->regs +
359 REG_PLUS_INTRN_MASK(ring));
360 break;
361#endif
362 default:
363 break;
364 }
365 }
366}
367
368static inline void cas_unmask_intr(struct cas *cp)
369{
370 int i;
371
372 for (i = 0; i < N_RX_COMP_RINGS; i++)
373 cas_enable_irq(cp, i);
374}
375
376static inline void cas_entropy_gather(struct cas *cp)
377{
378#ifdef USE_ENTROPY_DEV
379 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
380 return;
381
382 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
383 readl(cp->regs + REG_ENTROPY_IV),
384 sizeof(uint64_t)*8);
385#endif
386}
387
388static inline void cas_entropy_reset(struct cas *cp)
389{
390#ifdef USE_ENTROPY_DEV
391 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
392 return;
393
394 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
395 cp->regs + REG_BIM_LOCAL_DEV_EN);
396 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
397 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
398
399 /* if we read back 0x0, we don't have an entropy device */
400 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
401 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
402#endif
403}
404
405/* access to the phy. the following assumes that we've initialized the MIF to
406 * be in frame rather than bit-bang mode
407 */
408static u16 cas_phy_read(struct cas *cp, int reg)
409{
410 u32 cmd;
411 int limit = STOP_TRIES_PHY;
412
413 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
414 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
415 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
416 cmd |= MIF_FRAME_TURN_AROUND_MSB;
417 writel(cmd, cp->regs + REG_MIF_FRAME);
418
419 /* poll for completion */
420 while (limit-- > 0) {
421 udelay(10);
422 cmd = readl(cp->regs + REG_MIF_FRAME);
423 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
424 return (cmd & MIF_FRAME_DATA_MASK);
425 }
426 return 0xFFFF; /* -1 */
427}
428
429static int cas_phy_write(struct cas *cp, int reg, u16 val)
430{
431 int limit = STOP_TRIES_PHY;
432 u32 cmd;
433
434 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
435 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
436 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
437 cmd |= MIF_FRAME_TURN_AROUND_MSB;
438 cmd |= val & MIF_FRAME_DATA_MASK;
439 writel(cmd, cp->regs + REG_MIF_FRAME);
440
441 /* poll for completion */
442 while (limit-- > 0) {
443 udelay(10);
444 cmd = readl(cp->regs + REG_MIF_FRAME);
445 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
446 return 0;
447 }
448 return -1;
449}
450
451static void cas_phy_powerup(struct cas *cp)
452{
453 u16 ctl = cas_phy_read(cp, MII_BMCR);
454
455 if ((ctl & BMCR_PDOWN) == 0)
456 return;
457 ctl &= ~BMCR_PDOWN;
458 cas_phy_write(cp, MII_BMCR, ctl);
459}
460
461static void cas_phy_powerdown(struct cas *cp)
462{
463 u16 ctl = cas_phy_read(cp, MII_BMCR);
464
465 if (ctl & BMCR_PDOWN)
466 return;
467 ctl |= BMCR_PDOWN;
468 cas_phy_write(cp, MII_BMCR, ctl);
469}
470
471/* cp->lock held. note: the last put_page will free the buffer */
472static int cas_page_free(struct cas *cp, cas_page_t *page)
473{
474 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
475 PCI_DMA_FROMDEVICE);
476 __free_pages(page->buffer, cp->page_order);
477 kfree(page);
478 return 0;
479}
480
481#ifdef RX_COUNT_BUFFERS
482#define RX_USED_ADD(x, y) ((x)->used += (y))
483#define RX_USED_SET(x, y) ((x)->used = (y))
484#else
485#define RX_USED_ADD(x, y)
486#define RX_USED_SET(x, y)
487#endif
488
489/* local page allocation routines for the receive buffers. jumbo pages
490 * require at least 8K contiguous and 8K aligned buffers.
491 */
492static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
493{
494 cas_page_t *page;
495
496 page = kmalloc(sizeof(cas_page_t), flags);
497 if (!page)
498 return NULL;
499
500 INIT_LIST_HEAD(&page->list);
501 RX_USED_SET(page, 0);
502 page->buffer = alloc_pages(flags, cp->page_order);
503 if (!page->buffer)
504 goto page_err;
505 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
506 cp->page_size, PCI_DMA_FROMDEVICE);
507 return page;
508
509page_err:
510 kfree(page);
511 return NULL;
512}
513
514/* initialize spare pool of rx buffers, but allocate during the open */
515static void cas_spare_init(struct cas *cp)
516{
517 spin_lock(&cp->rx_inuse_lock);
518 INIT_LIST_HEAD(&cp->rx_inuse_list);
519 spin_unlock(&cp->rx_inuse_lock);
520
521 spin_lock(&cp->rx_spare_lock);
522 INIT_LIST_HEAD(&cp->rx_spare_list);
523 cp->rx_spares_needed = RX_SPARE_COUNT;
524 spin_unlock(&cp->rx_spare_lock);
525}
526
527/* used on close. free all the spare buffers. */
528static void cas_spare_free(struct cas *cp)
529{
530 struct list_head list, *elem, *tmp;
531
532 /* free spare buffers */
533 INIT_LIST_HEAD(&list);
534 spin_lock(&cp->rx_spare_lock);
535 list_splice(&cp->rx_spare_list, &list);
536 INIT_LIST_HEAD(&cp->rx_spare_list);
537 spin_unlock(&cp->rx_spare_lock);
538 list_for_each_safe(elem, tmp, &list) {
539 cas_page_free(cp, list_entry(elem, cas_page_t, list));
540 }
541
542 INIT_LIST_HEAD(&list);
543#if 1
544 /*
545 * Looks like Adrian had protected this with a different
546 * lock than used everywhere else to manipulate this list.
547 */
548 spin_lock(&cp->rx_inuse_lock);
549 list_splice(&cp->rx_inuse_list, &list);
550 INIT_LIST_HEAD(&cp->rx_inuse_list);
551 spin_unlock(&cp->rx_inuse_lock);
552#else
553 spin_lock(&cp->rx_spare_lock);
554 list_splice(&cp->rx_inuse_list, &list);
555 INIT_LIST_HEAD(&cp->rx_inuse_list);
556 spin_unlock(&cp->rx_spare_lock);
557#endif
558 list_for_each_safe(elem, tmp, &list) {
559 cas_page_free(cp, list_entry(elem, cas_page_t, list));
560 }
561}
562
563/* replenish spares if needed */
564static void cas_spare_recover(struct cas *cp, const int flags)
565{
566 struct list_head list, *elem, *tmp;
567 int needed, i;
568
569 /* check inuse list. if we don't need any more free buffers,
570 * just free it
571 */
572
573 /* make a local copy of the list */
574 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock);
576 list_splice(&cp->rx_inuse_list, &list);
577 INIT_LIST_HEAD(&cp->rx_inuse_list);
578 spin_unlock(&cp->rx_inuse_lock);
579
580 list_for_each_safe(elem, tmp, &list) {
581 cas_page_t *page = list_entry(elem, cas_page_t, list);
582
583 if (page_count(page->buffer) > 1)
584 continue;
585
586 list_del(elem);
587 spin_lock(&cp->rx_spare_lock);
588 if (cp->rx_spares_needed > 0) {
589 list_add(elem, &cp->rx_spare_list);
590 cp->rx_spares_needed--;
591 spin_unlock(&cp->rx_spare_lock);
592 } else {
593 spin_unlock(&cp->rx_spare_lock);
594 cas_page_free(cp, page);
595 }
596 }
597
598 /* put any inuse buffers back on the list */
599 if (!list_empty(&list)) {
600 spin_lock(&cp->rx_inuse_lock);
601 list_splice(&list, &cp->rx_inuse_list);
602 spin_unlock(&cp->rx_inuse_lock);
603 }
604
605 spin_lock(&cp->rx_spare_lock);
606 needed = cp->rx_spares_needed;
607 spin_unlock(&cp->rx_spare_lock);
608 if (!needed)
609 return;
610
611 /* we still need spares, so try to allocate some */
612 INIT_LIST_HEAD(&list);
613 i = 0;
614 while (i < needed) {
615 cas_page_t *spare = cas_page_alloc(cp, flags);
616 if (!spare)
617 break;
618 list_add(&spare->list, &list);
619 i++;
620 }
621
622 spin_lock(&cp->rx_spare_lock);
623 list_splice(&list, &cp->rx_spare_list);
624 cp->rx_spares_needed -= i;
625 spin_unlock(&cp->rx_spare_lock);
626}
627
628/* pull a page from the list. */
629static cas_page_t *cas_page_dequeue(struct cas *cp)
630{
631 struct list_head *entry;
632 int recover;
633
634 spin_lock(&cp->rx_spare_lock);
635 if (list_empty(&cp->rx_spare_list)) {
636 /* try to do a quick recovery */
637 spin_unlock(&cp->rx_spare_lock);
638 cas_spare_recover(cp, GFP_ATOMIC);
639 spin_lock(&cp->rx_spare_lock);
640 if (list_empty(&cp->rx_spare_list)) {
641 if (netif_msg_rx_err(cp))
642 printk(KERN_ERR "%s: no spare buffers "
643 "available.\n", cp->dev->name);
644 spin_unlock(&cp->rx_spare_lock);
645 return NULL;
646 }
647 }
648
649 entry = cp->rx_spare_list.next;
650 list_del(entry);
651 recover = ++cp->rx_spares_needed;
652 spin_unlock(&cp->rx_spare_lock);
653
654 /* trigger the timer to do the recovery */
655 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
656#if 1
657 atomic_inc(&cp->reset_task_pending);
658 atomic_inc(&cp->reset_task_pending_spare);
659 schedule_work(&cp->reset_task);
660#else
661 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
662 schedule_work(&cp->reset_task);
663#endif
664 }
665 return list_entry(entry, cas_page_t, list);
666}
667
668
669static void cas_mif_poll(struct cas *cp, const int enable)
670{
671 u32 cfg;
672
673 cfg = readl(cp->regs + REG_MIF_CFG);
674 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
675
676 if (cp->phy_type & CAS_PHY_MII_MDIO1)
677 cfg |= MIF_CFG_PHY_SELECT;
678
679 /* poll and interrupt on link status change. */
680 if (enable) {
681 cfg |= MIF_CFG_POLL_EN;
682 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
683 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
684 }
685 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
686 cp->regs + REG_MIF_MASK);
687 writel(cfg, cp->regs + REG_MIF_CFG);
688}
689
690/* Must be invoked under cp->lock */
691static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
692{
693 u16 ctl;
694#if 1
695 int lcntl;
696 int changed = 0;
697 int oldstate = cp->lstate;
698 int link_was_not_down = !(oldstate == link_down);
699#endif
700 /* Setup link parameters */
701 if (!ep)
702 goto start_aneg;
703 lcntl = cp->link_cntl;
704 if (ep->autoneg == AUTONEG_ENABLE)
705 cp->link_cntl = BMCR_ANENABLE;
706 else {
707 cp->link_cntl = 0;
708 if (ep->speed == SPEED_100)
709 cp->link_cntl |= BMCR_SPEED100;
710 else if (ep->speed == SPEED_1000)
711 cp->link_cntl |= CAS_BMCR_SPEED1000;
712 if (ep->duplex == DUPLEX_FULL)
713 cp->link_cntl |= BMCR_FULLDPLX;
714 }
715#if 1
716 changed = (lcntl != cp->link_cntl);
717#endif
718start_aneg:
719 if (cp->lstate == link_up) {
720 printk(KERN_INFO "%s: PCS link down.\n",
721 cp->dev->name);
722 } else {
723 if (changed) {
724 printk(KERN_INFO "%s: link configuration changed\n",
725 cp->dev->name);
726 }
727 }
728 cp->lstate = link_down;
729 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
730 if (!cp->hw_running)
731 return;
732#if 1
733 /*
734 * WTZ: If the old state was link_up, we turn off the carrier
735 * to replicate everything we do elsewhere on a link-down
736 * event when we were already in a link-up state..
737 */
738 if (oldstate == link_up)
739 netif_carrier_off(cp->dev);
740 if (changed && link_was_not_down) {
741 /*
742 * WTZ: This branch will simply schedule a full reset after
743 * we explicitly changed link modes in an ioctl. See if this
744 * fixes the link-problems we were having for forced mode.
745 */
746 atomic_inc(&cp->reset_task_pending);
747 atomic_inc(&cp->reset_task_pending_all);
748 schedule_work(&cp->reset_task);
749 cp->timer_ticks = 0;
750 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
751 return;
752 }
753#endif
754 if (cp->phy_type & CAS_PHY_SERDES) {
755 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
756
757 if (cp->link_cntl & BMCR_ANENABLE) {
758 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
759 cp->lstate = link_aneg;
760 } else {
761 if (cp->link_cntl & BMCR_FULLDPLX)
762 val |= PCS_MII_CTRL_DUPLEX;
763 val &= ~PCS_MII_AUTONEG_EN;
764 cp->lstate = link_force_ok;
765 }
766 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
767 writel(val, cp->regs + REG_PCS_MII_CTRL);
768
769 } else {
770 cas_mif_poll(cp, 0);
771 ctl = cas_phy_read(cp, MII_BMCR);
772 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
773 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
774 ctl |= cp->link_cntl;
775 if (ctl & BMCR_ANENABLE) {
776 ctl |= BMCR_ANRESTART;
777 cp->lstate = link_aneg;
778 } else {
779 cp->lstate = link_force_ok;
780 }
781 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
782 cas_phy_write(cp, MII_BMCR, ctl);
783 cas_mif_poll(cp, 1);
784 }
785
786 cp->timer_ticks = 0;
787 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
788}
789
790/* Must be invoked under cp->lock. */
791static int cas_reset_mii_phy(struct cas *cp)
792{
793 int limit = STOP_TRIES_PHY;
794 u16 val;
795
796 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
797 udelay(100);
798 while (limit--) {
799 val = cas_phy_read(cp, MII_BMCR);
800 if ((val & BMCR_RESET) == 0)
801 break;
802 udelay(10);
803 }
804 return (limit <= 0);
805}
806
807static void cas_saturn_firmware_load(struct cas *cp)
808{
809 cas_saturn_patch_t *patch = cas_saturn_patch;
810
811 cas_phy_powerdown(cp);
812
813 /* expanded memory access mode */
814 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
815
816 /* pointer configuration for new firmware */
817 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
818 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
819 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
820 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
821 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
822 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
823 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
824 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
825
826 /* download new firmware */
827 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
828 cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
829 while (patch->addr) {
830 cas_phy_write(cp, DP83065_MII_REGD, patch->val);
831 patch++;
832 }
833
834 /* enable firmware */
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
836 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
837}
838
839
840/* phy initialization */
841static void cas_phy_init(struct cas *cp)
842{
843 u16 val;
844
845 /* if we're in MII/GMII mode, set up phy */
846 if (CAS_PHY_MII(cp->phy_type)) {
847 writel(PCS_DATAPATH_MODE_MII,
848 cp->regs + REG_PCS_DATAPATH_MODE);
849
850 cas_mif_poll(cp, 0);
851 cas_reset_mii_phy(cp); /* take out of isolate mode */
852
853 if (PHY_LUCENT_B0 == cp->phy_id) {
854 /* workaround link up/down issue with lucent */
855 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
856 cas_phy_write(cp, MII_BMCR, 0x00f1);
857 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
858
859 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
860 /* workarounds for broadcom phy */
861 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
862 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
863 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
864 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
865 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
866 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
867 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
868 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
869 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
870 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
871 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
872
873 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
874 val = cas_phy_read(cp, BROADCOM_MII_REG4);
875 val = cas_phy_read(cp, BROADCOM_MII_REG4);
876 if (val & 0x0080) {
877 /* link workaround */
878 cas_phy_write(cp, BROADCOM_MII_REG4,
879 val & ~0x0080);
880 }
881
882 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
883 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
884 SATURN_PCFG_FSI : 0x0,
885 cp->regs + REG_SATURN_PCFG);
886
887 /* load firmware to address 10Mbps auto-negotiation
888 * issue. NOTE: this will need to be changed if the
889 * default firmware gets fixed.
890 */
891 if (PHY_NS_DP83065 == cp->phy_id) {
892 cas_saturn_firmware_load(cp);
893 }
894 cas_phy_powerup(cp);
895 }
896
897 /* advertise capabilities */
898 val = cas_phy_read(cp, MII_BMCR);
899 val &= ~BMCR_ANENABLE;
900 cas_phy_write(cp, MII_BMCR, val);
901 udelay(10);
902
903 cas_phy_write(cp, MII_ADVERTISE,
904 cas_phy_read(cp, MII_ADVERTISE) |
905 (ADVERTISE_10HALF | ADVERTISE_10FULL |
906 ADVERTISE_100HALF | ADVERTISE_100FULL |
907 CAS_ADVERTISE_PAUSE |
908 CAS_ADVERTISE_ASYM_PAUSE));
909
910 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
911 /* make sure that we don't advertise half
912 * duplex to avoid a chip issue
913 */
914 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
915 val &= ~CAS_ADVERTISE_1000HALF;
916 val |= CAS_ADVERTISE_1000FULL;
917 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
918 }
919
920 } else {
921 /* reset pcs for serdes */
922 u32 val;
923 int limit;
924
925 writel(PCS_DATAPATH_MODE_SERDES,
926 cp->regs + REG_PCS_DATAPATH_MODE);
927
928 /* enable serdes pins on saturn */
929 if (cp->cas_flags & CAS_FLAG_SATURN)
930 writel(0, cp->regs + REG_SATURN_PCFG);
931
932 /* Reset PCS unit. */
933 val = readl(cp->regs + REG_PCS_MII_CTRL);
934 val |= PCS_MII_RESET;
935 writel(val, cp->regs + REG_PCS_MII_CTRL);
936
937 limit = STOP_TRIES;
938 while (limit-- > 0) {
939 udelay(10);
940 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
941 PCS_MII_RESET) == 0)
942 break;
943 }
944 if (limit <= 0)
945 printk(KERN_WARNING "%s: PCS reset bit would not "
946 "clear [%08x].\n", cp->dev->name,
947 readl(cp->regs + REG_PCS_STATE_MACHINE));
948
949 /* Make sure PCS is disabled while changing advertisement
950 * configuration.
951 */
952 writel(0x0, cp->regs + REG_PCS_CFG);
953
954 /* Advertise all capabilities except half-duplex. */
955 val = readl(cp->regs + REG_PCS_MII_ADVERT);
956 val &= ~PCS_MII_ADVERT_HD;
957 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
958 PCS_MII_ADVERT_ASYM_PAUSE);
959 writel(val, cp->regs + REG_PCS_MII_ADVERT);
960
961 /* enable PCS */
962 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
963
964 /* pcs workaround: enable sync detect */
965 writel(PCS_SERDES_CTRL_SYNCD_EN,
966 cp->regs + REG_PCS_SERDES_CTRL);
967 }
968}
969
970
971static int cas_pcs_link_check(struct cas *cp)
972{
973 u32 stat, state_machine;
974 int retval = 0;
975
976 /* The link status bit latches on zero, so you must
977 * read it twice in such a case to see a transition
978 * to the link being up.
979 */
980 stat = readl(cp->regs + REG_PCS_MII_STATUS);
981 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
982 stat = readl(cp->regs + REG_PCS_MII_STATUS);
983
984 /* The remote-fault indication is only valid
985 * when autoneg has completed.
986 */
987 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
988 PCS_MII_STATUS_REMOTE_FAULT)) ==
989 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
990 if (netif_msg_link(cp))
991 printk(KERN_INFO "%s: PCS RemoteFault\n",
992 cp->dev->name);
993 }
994
995 /* work around link detection issue by querying the PCS state
996 * machine directly.
997 */
998 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
999 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1000 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1001 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1002 stat |= PCS_MII_STATUS_LINK_STATUS;
1003 }
1004
1005 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1006 if (cp->lstate != link_up) {
1007 if (cp->opened) {
1008 cp->lstate = link_up;
1009 cp->link_transition = LINK_TRANSITION_LINK_UP;
1010
1011 cas_set_link_modes(cp);
1012 netif_carrier_on(cp->dev);
1013 }
1014 }
1015 } else if (cp->lstate == link_up) {
1016 cp->lstate = link_down;
1017 if (link_transition_timeout != 0 &&
1018 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1019 !cp->link_transition_jiffies_valid) {
1020 /*
1021 * force a reset, as a workaround for the
1022 * link-failure problem. May want to move this to a
1023 * point a bit earlier in the sequence. If we had
1024 * generated a reset a short time ago, we'll wait for
1025 * the link timer to check the status until a
1026 * timer expires (link_transistion_jiffies_valid is
1027 * true when the timer is running.) Instead of using
1028 * a system timer, we just do a check whenever the
1029 * link timer is running - this clears the flag after
1030 * a suitable delay.
1031 */
1032 retval = 1;
1033 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1034 cp->link_transition_jiffies = jiffies;
1035 cp->link_transition_jiffies_valid = 1;
1036 } else {
1037 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1038 }
1039 netif_carrier_off(cp->dev);
1040 if (cp->opened && netif_msg_link(cp)) {
1041 printk(KERN_INFO "%s: PCS link down.\n",
1042 cp->dev->name);
1043 }
1044
1045 /* Cassini only: if you force a mode, there can be
1046 * sync problems on link down. to fix that, the following
1047 * things need to be checked:
1048 * 1) read serialink state register
1049 * 2) read pcs status register to verify link down.
1050 * 3) if link down and serial link == 0x03, then you need
1051 * to global reset the chip.
1052 */
1053 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1054 /* should check to see if we're in a forced mode */
1055 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1056 if (stat == 0x03)
1057 return 1;
1058 }
1059 } else if (cp->lstate == link_down) {
1060 if (link_transition_timeout != 0 &&
1061 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1062 !cp->link_transition_jiffies_valid) {
1063 /* force a reset, as a workaround for the
1064 * link-failure problem. May want to move
1065 * this to a point a bit earlier in the
1066 * sequence.
1067 */
1068 retval = 1;
1069 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1070 cp->link_transition_jiffies = jiffies;
1071 cp->link_transition_jiffies_valid = 1;
1072 } else {
1073 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1074 }
1075 }
1076
1077 return retval;
1078}
1079
1080static int cas_pcs_interrupt(struct net_device *dev,
1081 struct cas *cp, u32 status)
1082{
1083 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1084
1085 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1086 return 0;
1087 return cas_pcs_link_check(cp);
1088}
1089
1090static int cas_txmac_interrupt(struct net_device *dev,
1091 struct cas *cp, u32 status)
1092{
1093 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1094
1095 if (!txmac_stat)
1096 return 0;
1097
1098 if (netif_msg_intr(cp))
1099 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1100 cp->dev->name, txmac_stat);
1101
1102 /* Defer timer expiration is quite normal,
1103 * don't even log the event.
1104 */
1105 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1106 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1107 return 0;
1108
1109 spin_lock(&cp->stat_lock[0]);
1110 if (txmac_stat & MAC_TX_UNDERRUN) {
1111 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1112 dev->name);
1113 cp->net_stats[0].tx_fifo_errors++;
1114 }
1115
1116 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1117 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1118 dev->name);
1119 cp->net_stats[0].tx_errors++;
1120 }
1121
1122 /* The rest are all cases of one of the 16-bit TX
1123 * counters expiring.
1124 */
1125 if (txmac_stat & MAC_TX_COLL_NORMAL)
1126 cp->net_stats[0].collisions += 0x10000;
1127
1128 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129 cp->net_stats[0].tx_aborted_errors += 0x10000;
1130 cp->net_stats[0].collisions += 0x10000;
1131 }
1132
1133 if (txmac_stat & MAC_TX_COLL_LATE) {
1134 cp->net_stats[0].tx_aborted_errors += 0x10000;
1135 cp->net_stats[0].collisions += 0x10000;
1136 }
1137 spin_unlock(&cp->stat_lock[0]);
1138
1139 /* We do not keep track of MAC_TX_COLL_FIRST and
1140 * MAC_TX_PEAK_ATTEMPTS events.
1141 */
1142 return 0;
1143}
1144
1145static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146{
1147 cas_hp_inst_t *inst;
1148 u32 val;
1149 int i;
1150
1151 i = 0;
1152 while ((inst = firmware) && inst->note) {
1153 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154
1155 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158
1159 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167
1168 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173 ++firmware;
1174 ++i;
1175 }
1176}
1177
1178static void cas_init_rx_dma(struct cas *cp)
1179{
1180 u64 desc_dma = cp->block_dvma;
1181 u32 val;
1182 int i, size;
1183
1184 /* rx free descriptors */
1185 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188 if ((N_RX_DESC_RINGS > 1) &&
1189 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1190 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191 writel(val, cp->regs + REG_RX_CFG);
1192
1193 val = (unsigned long) cp->init_rxds[0] -
1194 (unsigned long) cp->init_block;
1195 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198
1199 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200 /* rx desc 2 is for IPSEC packets. however,
1201 * we don't it that for that purpose.
1202 */
1203 val = (unsigned long) cp->init_rxds[1] -
1204 (unsigned long) cp->init_block;
1205 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206 writel((desc_dma + val) & 0xffffffff, cp->regs +
1207 REG_PLUS_RX_DB1_LOW);
1208 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209 REG_PLUS_RX_KICK1);
1210 }
1211
1212 /* rx completion registers */
1213 val = (unsigned long) cp->init_rxcs[0] -
1214 (unsigned long) cp->init_block;
1215 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217
1218 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219 /* rx comp 2-4 */
1220 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221 val = (unsigned long) cp->init_rxcs[i] -
1222 (unsigned long) cp->init_block;
1223 writel((desc_dma + val) >> 32, cp->regs +
1224 REG_PLUS_RX_CBN_HI(i));
1225 writel((desc_dma + val) & 0xffffffff, cp->regs +
1226 REG_PLUS_RX_CBN_LOW(i));
1227 }
1228 }
1229
1230 /* read selective clear regs to prevent spurious interrupts
1231 * on reset because complete == kick.
1232 * selective clear set up to prevent interrupts on resets
1233 */
1234 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1236 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1237 for (i = 1; i < N_RX_COMP_RINGS; i++)
1238 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1239
1240 /* 2 is different from 3 and 4 */
1241 if (N_RX_COMP_RINGS > 1)
1242 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1243 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1244
1245 for (i = 2; i < N_RX_COMP_RINGS; i++)
1246 writel(INTR_RX_DONE_ALT,
1247 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1248 }
1249
1250 /* set up pause thresholds */
1251 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1252 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1253 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1254 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1255 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1256
1257 /* zero out dma reassembly buffers */
1258 for (i = 0; i < 64; i++) {
1259 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1260 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1261 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1262 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1263 }
1264
1265 /* make sure address register is 0 for normal operation */
1266 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1267 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1268
1269 /* interrupt mitigation */
1270#ifdef USE_RX_BLANK
1271 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1272 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1273 writel(val, cp->regs + REG_RX_BLANK);
1274#else
1275 writel(0x0, cp->regs + REG_RX_BLANK);
1276#endif
1277
1278 /* interrupt generation as a function of low water marks for
1279 * free desc and completion entries. these are used to trigger
1280 * housekeeping for rx descs. we don't use the free interrupt
1281 * as it's not very useful
1282 */
1283 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1284 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1285 writel(val, cp->regs + REG_RX_AE_THRESH);
1286 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1287 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1288 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1289 }
1290
1291 /* Random early detect registers. useful for congestion avoidance.
1292 * this should be tunable.
1293 */
1294 writel(0x0, cp->regs + REG_RX_RED);
1295
1296 /* receive page sizes. default == 2K (0x800) */
1297 val = 0;
1298 if (cp->page_size == 0x1000)
1299 val = 0x1;
1300 else if (cp->page_size == 0x2000)
1301 val = 0x2;
1302 else if (cp->page_size == 0x4000)
1303 val = 0x3;
1304
1305 /* round mtu + offset. constrain to page size. */
1306 size = cp->dev->mtu + 64;
1307 if (size > cp->page_size)
1308 size = cp->page_size;
1309
1310 if (size <= 0x400)
1311 i = 0x0;
1312 else if (size <= 0x800)
1313 i = 0x1;
1314 else if (size <= 0x1000)
1315 i = 0x2;
1316 else
1317 i = 0x3;
1318
1319 cp->mtu_stride = 1 << (i + 10);
1320 val = CAS_BASE(RX_PAGE_SIZE, val);
1321 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1322 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1323 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1324 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1325
1326 /* enable the header parser if desired */
1327 if (CAS_HP_FIRMWARE == cas_prog_null)
1328 return;
1329
1330 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1331 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1332 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1333 writel(val, cp->regs + REG_HP_CFG);
1334}
1335
1336static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1337{
1338 memset(rxc, 0, sizeof(*rxc));
1339 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1340}
1341
1342/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1343 * flipping is protected by the fact that the chip will not
1344 * hand back the same page index while it's being processed.
1345 */
1346static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347{
1348 cas_page_t *page = cp->rx_pages[1][index];
1349 cas_page_t *new;
1350
1351 if (page_count(page->buffer) == 1)
1352 return page;
1353
1354 new = cas_page_dequeue(cp);
1355 if (new) {
1356 spin_lock(&cp->rx_inuse_lock);
1357 list_add(&page->list, &cp->rx_inuse_list);
1358 spin_unlock(&cp->rx_inuse_lock);
1359 }
1360 return new;
1361}
1362
1363/* this needs to be changed if we actually use the ENC RX DESC ring */
1364static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1365 const int index)
1366{
1367 cas_page_t **page0 = cp->rx_pages[0];
1368 cas_page_t **page1 = cp->rx_pages[1];
1369
1370 /* swap if buffer is in use */
1371 if (page_count(page0[index]->buffer) > 1) {
1372 cas_page_t *new = cas_page_spare(cp, index);
1373 if (new) {
1374 page1[index] = page0[index];
1375 page0[index] = new;
1376 }
1377 }
1378 RX_USED_SET(page0[index], 0);
1379 return page0[index];
1380}
1381
1382static void cas_clean_rxds(struct cas *cp)
1383{
1384 /* only clean ring 0 as ring 1 is used for spare buffers */
1385 struct cas_rx_desc *rxd = cp->init_rxds[0];
1386 int i, size;
1387
1388 /* release all rx flows */
1389 for (i = 0; i < N_RX_FLOWS; i++) {
1390 struct sk_buff *skb;
1391 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1392 cas_skb_release(skb);
1393 }
1394 }
1395
1396 /* initialize descriptors */
1397 size = RX_DESC_RINGN_SIZE(0);
1398 for (i = 0; i < size; i++) {
1399 cas_page_t *page = cas_page_swap(cp, 0, i);
1400 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1401 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1402 CAS_BASE(RX_INDEX_RING, 0));
1403 }
1404
1405 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1406 cp->rx_last[0] = 0;
1407 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1408}
1409
1410static void cas_clean_rxcs(struct cas *cp)
1411{
1412 int i, j;
1413
1414 /* take ownership of rx comp descriptors */
1415 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1416 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1417 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1418 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1419 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1420 cas_rxc_init(rxc + j);
1421 }
1422 }
1423}
1424
1425#if 0
1426/* When we get a RX fifo overflow, the RX unit is probably hung
1427 * so we do the following.
1428 *
1429 * If any part of the reset goes wrong, we return 1 and that causes the
1430 * whole chip to be reset.
1431 */
1432static int cas_rxmac_reset(struct cas *cp)
1433{
1434 struct net_device *dev = cp->dev;
1435 int limit;
1436 u32 val;
1437
1438 /* First, reset MAC RX. */
1439 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1440 for (limit = 0; limit < STOP_TRIES; limit++) {
1441 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1442 break;
1443 udelay(10);
1444 }
1445 if (limit == STOP_TRIES) {
1446 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1447 "chip.\n", dev->name);
1448 return 1;
1449 }
1450
1451 /* Second, disable RX DMA. */
1452 writel(0, cp->regs + REG_RX_CFG);
1453 for (limit = 0; limit < STOP_TRIES; limit++) {
1454 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1455 break;
1456 udelay(10);
1457 }
1458 if (limit == STOP_TRIES) {
1459 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1460 "chip.\n", dev->name);
1461 return 1;
1462 }
1463
1464 mdelay(5);
1465
1466 /* Execute RX reset command. */
1467 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1468 for (limit = 0; limit < STOP_TRIES; limit++) {
1469 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1470 break;
1471 udelay(10);
1472 }
1473 if (limit == STOP_TRIES) {
1474 printk(KERN_ERR "%s: RX reset command will not execute, "
1475 "resetting whole chip.\n", dev->name);
1476 return 1;
1477 }
1478
1479 /* reset driver rx state */
1480 cas_clean_rxds(cp);
1481 cas_clean_rxcs(cp);
1482
1483 /* Now, reprogram the rest of RX unit. */
1484 cas_init_rx_dma(cp);
1485
1486 /* re-enable */
1487 val = readl(cp->regs + REG_RX_CFG);
1488 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1489 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1490 val = readl(cp->regs + REG_MAC_RX_CFG);
1491 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1492 return 0;
1493}
1494#endif
1495
1496static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1497 u32 status)
1498{
1499 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1500
1501 if (!stat)
1502 return 0;
1503
1504 if (netif_msg_intr(cp))
1505 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1506 cp->dev->name, stat);
1507
1508 /* these are all rollovers */
1509 spin_lock(&cp->stat_lock[0]);
1510 if (stat & MAC_RX_ALIGN_ERR)
1511 cp->net_stats[0].rx_frame_errors += 0x10000;
1512
1513 if (stat & MAC_RX_CRC_ERR)
1514 cp->net_stats[0].rx_crc_errors += 0x10000;
1515
1516 if (stat & MAC_RX_LEN_ERR)
1517 cp->net_stats[0].rx_length_errors += 0x10000;
1518
1519 if (stat & MAC_RX_OVERFLOW) {
1520 cp->net_stats[0].rx_over_errors++;
1521 cp->net_stats[0].rx_fifo_errors++;
1522 }
1523
1524 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1525 * events.
1526 */
1527 spin_unlock(&cp->stat_lock[0]);
1528 return 0;
1529}
1530
1531static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1532 u32 status)
1533{
1534 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1535
1536 if (!stat)
1537 return 0;
1538
1539 if (netif_msg_intr(cp))
1540 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1541 cp->dev->name, stat);
1542
1543 /* This interrupt is just for pause frame and pause
1544 * tracking. It is useful for diagnostics and debug
1545 * but probably by default we will mask these events.
1546 */
1547 if (stat & MAC_CTRL_PAUSE_STATE)
1548 cp->pause_entered++;
1549
1550 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1551 cp->pause_last_time_recvd = (stat >> 16);
1552
1553 return 0;
1554}
1555
1556
1557/* Must be invoked under cp->lock. */
1558static inline int cas_mdio_link_not_up(struct cas *cp)
1559{
1560 u16 val;
1561
1562 switch (cp->lstate) {
1563 case link_force_ret:
1564 if (netif_msg_link(cp))
1565 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1566 " forced mode\n", cp->dev->name);
1567 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1568 cp->timer_ticks = 5;
1569 cp->lstate = link_force_ok;
1570 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1571 break;
1572
1573 case link_aneg:
1574 val = cas_phy_read(cp, MII_BMCR);
1575
1576 /* Try forced modes. we try things in the following order:
1577 * 1000 full -> 100 full/half -> 10 half
1578 */
1579 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1580 val |= BMCR_FULLDPLX;
1581 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1582 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1583 cas_phy_write(cp, MII_BMCR, val);
1584 cp->timer_ticks = 5;
1585 cp->lstate = link_force_try;
1586 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1587 break;
1588
1589 case link_force_try:
1590 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1591 val = cas_phy_read(cp, MII_BMCR);
1592 cp->timer_ticks = 5;
1593 if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1594 val &= ~CAS_BMCR_SPEED1000;
1595 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1596 cas_phy_write(cp, MII_BMCR, val);
1597 break;
1598 }
1599
1600 if (val & BMCR_SPEED100) {
1601 if (val & BMCR_FULLDPLX) /* fd failed */
1602 val &= ~BMCR_FULLDPLX;
1603 else { /* 100Mbps failed */
1604 val &= ~BMCR_SPEED100;
1605 }
1606 cas_phy_write(cp, MII_BMCR, val);
1607 break;
1608 }
1609 default:
1610 break;
1611 }
1612 return 0;
1613}
1614
1615
1616/* must be invoked with cp->lock held */
1617static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1618{
1619 int restart;
1620
1621 if (bmsr & BMSR_LSTATUS) {
1622 /* Ok, here we got a link. If we had it due to a forced
1623 * fallback, and we were configured for autoneg, we
1624 * retry a short autoneg pass. If you know your hub is
1625 * broken, use ethtool ;)
1626 */
1627 if ((cp->lstate == link_force_try) &&
1628 (cp->link_cntl & BMCR_ANENABLE)) {
1629 cp->lstate = link_force_ret;
1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1631 cas_mif_poll(cp, 0);
1632 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1633 cp->timer_ticks = 5;
1634 if (cp->opened && netif_msg_link(cp))
1635 printk(KERN_INFO "%s: Got link after fallback, retrying"
1636 " autoneg once...\n", cp->dev->name);
1637 cas_phy_write(cp, MII_BMCR,
1638 cp->link_fcntl | BMCR_ANENABLE |
1639 BMCR_ANRESTART);
1640 cas_mif_poll(cp, 1);
1641
1642 } else if (cp->lstate != link_up) {
1643 cp->lstate = link_up;
1644 cp->link_transition = LINK_TRANSITION_LINK_UP;
1645
1646 if (cp->opened) {
1647 cas_set_link_modes(cp);
1648 netif_carrier_on(cp->dev);
1649 }
1650 }
1651 return 0;
1652 }
1653
1654 /* link not up. if the link was previously up, we restart the
1655 * whole process
1656 */
1657 restart = 0;
1658 if (cp->lstate == link_up) {
1659 cp->lstate = link_down;
1660 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1661
1662 netif_carrier_off(cp->dev);
1663 if (cp->opened && netif_msg_link(cp))
1664 printk(KERN_INFO "%s: Link down\n",
1665 cp->dev->name);
1666 restart = 1;
1667
1668 } else if (++cp->timer_ticks > 10)
1669 cas_mdio_link_not_up(cp);
1670
1671 return restart;
1672}
1673
1674static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1675 u32 status)
1676{
1677 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1678 u16 bmsr;
1679
1680 /* check for a link change */
1681 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1682 return 0;
1683
1684 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1685 return cas_mii_link_check(cp, bmsr);
1686}
1687
1688static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1689 u32 status)
1690{
1691 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1692
1693 if (!stat)
1694 return 0;
1695
1696 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1697 readl(cp->regs + REG_BIM_DIAG));
1698
1699 /* cassini+ has this reserved */
1700 if ((stat & PCI_ERR_BADACK) &&
1701 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1702 printk("<No ACK64# during ABS64 cycle> ");
1703
1704 if (stat & PCI_ERR_DTRTO)
1705 printk("<Delayed transaction timeout> ");
1706 if (stat & PCI_ERR_OTHER)
1707 printk("<other> ");
1708 if (stat & PCI_ERR_BIM_DMA_WRITE)
1709 printk("<BIM DMA 0 write req> ");
1710 if (stat & PCI_ERR_BIM_DMA_READ)
1711 printk("<BIM DMA 0 read req> ");
1712 printk("\n");
1713
1714 if (stat & PCI_ERR_OTHER) {
1715 u16 cfg;
1716
1717 /* Interrogate PCI config space for the
1718 * true cause.
1719 */
1720 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1721 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1722 dev->name, cfg);
1723 if (cfg & PCI_STATUS_PARITY)
1724 printk(KERN_ERR "%s: PCI parity error detected.\n",
1725 dev->name);
1726 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1727 printk(KERN_ERR "%s: PCI target abort.\n",
1728 dev->name);
1729 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1730 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1731 dev->name);
1732 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1733 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1734 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1735 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1736 dev->name);
1737 if (cfg & PCI_STATUS_DETECTED_PARITY)
1738 printk(KERN_ERR "%s: PCI parity error.\n",
1739 dev->name);
1740
1741 /* Write the error bits back to clear them. */
1742 cfg &= (PCI_STATUS_PARITY |
1743 PCI_STATUS_SIG_TARGET_ABORT |
1744 PCI_STATUS_REC_TARGET_ABORT |
1745 PCI_STATUS_REC_MASTER_ABORT |
1746 PCI_STATUS_SIG_SYSTEM_ERROR |
1747 PCI_STATUS_DETECTED_PARITY);
1748 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1749 }
1750
1751 /* For all PCI errors, we should reset the chip. */
1752 return 1;
1753}
1754
1755/* All non-normal interrupt conditions get serviced here.
1756 * Returns non-zero if we should just exit the interrupt
1757 * handler right now (ie. if we reset the card which invalidates
1758 * all of the other original irq status bits).
1759 */
1760static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1761 u32 status)
1762{
1763 if (status & INTR_RX_TAG_ERROR) {
1764 /* corrupt RX tag framing */
1765 if (netif_msg_rx_err(cp))
1766 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1767 cp->dev->name);
1768 spin_lock(&cp->stat_lock[0]);
1769 cp->net_stats[0].rx_errors++;
1770 spin_unlock(&cp->stat_lock[0]);
1771 goto do_reset;
1772 }
1773
1774 if (status & INTR_RX_LEN_MISMATCH) {
1775 /* length mismatch. */
1776 if (netif_msg_rx_err(cp))
1777 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1778 cp->dev->name);
1779 spin_lock(&cp->stat_lock[0]);
1780 cp->net_stats[0].rx_errors++;
1781 spin_unlock(&cp->stat_lock[0]);
1782 goto do_reset;
1783 }
1784
1785 if (status & INTR_PCS_STATUS) {
1786 if (cas_pcs_interrupt(dev, cp, status))
1787 goto do_reset;
1788 }
1789
1790 if (status & INTR_TX_MAC_STATUS) {
1791 if (cas_txmac_interrupt(dev, cp, status))
1792 goto do_reset;
1793 }
1794
1795 if (status & INTR_RX_MAC_STATUS) {
1796 if (cas_rxmac_interrupt(dev, cp, status))
1797 goto do_reset;
1798 }
1799
1800 if (status & INTR_MAC_CTRL_STATUS) {
1801 if (cas_mac_interrupt(dev, cp, status))
1802 goto do_reset;
1803 }
1804
1805 if (status & INTR_MIF_STATUS) {
1806 if (cas_mif_interrupt(dev, cp, status))
1807 goto do_reset;
1808 }
1809
1810 if (status & INTR_PCI_ERROR_STATUS) {
1811 if (cas_pci_interrupt(dev, cp, status))
1812 goto do_reset;
1813 }
1814 return 0;
1815
1816do_reset:
1817#if 1
1818 atomic_inc(&cp->reset_task_pending);
1819 atomic_inc(&cp->reset_task_pending_all);
1820 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1821 dev->name, status);
1822 schedule_work(&cp->reset_task);
1823#else
1824 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1825 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1826 schedule_work(&cp->reset_task);
1827#endif
1828 return 1;
1829}
1830
1831/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1832 * determining whether to do a netif_stop/wakeup
1833 */
1834#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1835#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1836static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1837 const int len)
1838{
1839 unsigned long off = addr + len;
1840
1841 if (CAS_TABORT(cp) == 1)
1842 return 0;
1843 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1844 return 0;
1845 return TX_TARGET_ABORT_LEN;
1846}
1847
1848static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1849{
1850 struct cas_tx_desc *txds;
1851 struct sk_buff **skbs;
1852 struct net_device *dev = cp->dev;
1853 int entry, count;
1854
1855 spin_lock(&cp->tx_lock[ring]);
1856 txds = cp->init_txds[ring];
1857 skbs = cp->tx_skbs[ring];
1858 entry = cp->tx_old[ring];
1859
1860 count = TX_BUFF_COUNT(ring, entry, limit);
1861 while (entry != limit) {
1862 struct sk_buff *skb = skbs[entry];
1863 dma_addr_t daddr;
1864 u32 dlen;
1865 int frag;
1866
1867 if (!skb) {
1868 /* this should never occur */
1869 entry = TX_DESC_NEXT(ring, entry);
1870 continue;
1871 }
1872
1873 /* however, we might get only a partial skb release. */
1874 count -= skb_shinfo(skb)->nr_frags +
1875 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1876 if (count < 0)
1877 break;
1878
1879 if (netif_msg_tx_done(cp))
1880 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1881 cp->dev->name, ring, entry);
1882
1883 skbs[entry] = NULL;
1884 cp->tx_tiny_use[ring][entry].nbufs = 0;
1885
1886 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1887 struct cas_tx_desc *txd = txds + entry;
1888
1889 daddr = le64_to_cpu(txd->buffer);
1890 dlen = CAS_VAL(TX_DESC_BUFLEN,
1891 le64_to_cpu(txd->control));
1892 pci_unmap_page(cp->pdev, daddr, dlen,
1893 PCI_DMA_TODEVICE);
1894 entry = TX_DESC_NEXT(ring, entry);
1895
1896 /* tiny buffer may follow */
1897 if (cp->tx_tiny_use[ring][entry].used) {
1898 cp->tx_tiny_use[ring][entry].used = 0;
1899 entry = TX_DESC_NEXT(ring, entry);
1900 }
1901 }
1902
1903 spin_lock(&cp->stat_lock[ring]);
1904 cp->net_stats[ring].tx_packets++;
1905 cp->net_stats[ring].tx_bytes += skb->len;
1906 spin_unlock(&cp->stat_lock[ring]);
1907 dev_kfree_skb_irq(skb);
1908 }
1909 cp->tx_old[ring] = entry;
1910
1911 /* this is wrong for multiple tx rings. the net device needs
1912 * multiple queues for this to do the right thing. we wait
1913 * for 2*packets to be available when using tiny buffers
1914 */
1915 if (netif_queue_stopped(dev) &&
1916 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1917 netif_wake_queue(dev);
1918 spin_unlock(&cp->tx_lock[ring]);
1919}
1920
1921static void cas_tx(struct net_device *dev, struct cas *cp,
1922 u32 status)
1923{
1924 int limit, ring;
1925#ifdef USE_TX_COMPWB
1926 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1927#endif
1928 if (netif_msg_intr(cp))
1929 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
1930 cp->dev->name, status, compwb);
1931 /* process all the rings */
1932 for (ring = 0; ring < N_TX_RINGS; ring++) {
1933#ifdef USE_TX_COMPWB
1934 /* use the completion writeback registers */
1935 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1936 CAS_VAL(TX_COMPWB_LSB, compwb);
1937 compwb = TX_COMPWB_NEXT(compwb);
1938#else
1939 limit = readl(cp->regs + REG_TX_COMPN(ring));
1940#endif
1941 if (cp->tx_old[ring] != limit)
1942 cas_tx_ringN(cp, ring, limit);
1943 }
1944}
1945
1946
1947static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1948 int entry, const u64 *words,
1949 struct sk_buff **skbref)
1950{
1951 int dlen, hlen, len, i, alloclen;
1952 int off, swivel = RX_SWIVEL_OFF_VAL;
1953 struct cas_page *page;
1954 struct sk_buff *skb;
1955 void *addr, *crcaddr;
1956 char *p;
1957
1958 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1959 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1960 len = hlen + dlen;
1961
1962 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1963 alloclen = len;
1964 else
1965 alloclen = max(hlen, RX_COPY_MIN);
1966
1967 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1968 if (skb == NULL)
1969 return -1;
1970
1971 *skbref = skb;
1972 skb->dev = cp->dev;
1973 skb_reserve(skb, swivel);
1974
1975 p = skb->data;
1976 addr = crcaddr = NULL;
1977 if (hlen) { /* always copy header pages */
1978 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1979 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1980 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1981 swivel;
1982
1983 i = hlen;
1984 if (!dlen) /* attach FCS */
1985 i += cp->crc_size;
1986 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1987 PCI_DMA_FROMDEVICE);
1988 addr = cas_page_map(page->buffer);
1989 memcpy(p, addr + off, i);
1990 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1991 PCI_DMA_FROMDEVICE);
1992 cas_page_unmap(addr);
1993 RX_USED_ADD(page, 0x100);
1994 p += hlen;
1995 swivel = 0;
1996 }
1997
1998
1999 if (alloclen < (hlen + dlen)) {
2000 skb_frag_t *frag = skb_shinfo(skb)->frags;
2001
2002 /* normal or jumbo packets. we use frags */
2003 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2004 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2005 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2006
2007 hlen = min(cp->page_size - off, dlen);
2008 if (hlen < 0) {
2009 if (netif_msg_rx_err(cp)) {
2010 printk(KERN_DEBUG "%s: rx page overflow: "
2011 "%d\n", cp->dev->name, hlen);
2012 }
2013 dev_kfree_skb_irq(skb);
2014 return -1;
2015 }
2016 i = hlen;
2017 if (i == dlen) /* attach FCS */
2018 i += cp->crc_size;
2019 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2020 PCI_DMA_FROMDEVICE);
2021
2022 /* make sure we always copy a header */
2023 swivel = 0;
2024 if (p == (char *) skb->data) { /* not split */
2025 addr = cas_page_map(page->buffer);
2026 memcpy(p, addr + off, RX_COPY_MIN);
2027 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2028 PCI_DMA_FROMDEVICE);
2029 cas_page_unmap(addr);
2030 off += RX_COPY_MIN;
2031 swivel = RX_COPY_MIN;
2032 RX_USED_ADD(page, cp->mtu_stride);
2033 } else {
2034 RX_USED_ADD(page, hlen);
2035 }
2036 skb_put(skb, alloclen);
2037
2038 skb_shinfo(skb)->nr_frags++;
2039 skb->data_len += hlen - swivel;
2040 skb->len += hlen - swivel;
2041
2042 get_page(page->buffer);
2043 frag->page = page->buffer;
2044 frag->page_offset = off;
2045 frag->size = hlen - swivel;
2046
2047 /* any more data? */
2048 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2049 hlen = dlen;
2050 off = 0;
2051
2052 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2053 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2054 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2055 hlen + cp->crc_size,
2056 PCI_DMA_FROMDEVICE);
2057 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2058 hlen + cp->crc_size,
2059 PCI_DMA_FROMDEVICE);
2060
2061 skb_shinfo(skb)->nr_frags++;
2062 skb->data_len += hlen;
2063 skb->len += hlen;
2064 frag++;
2065
2066 get_page(page->buffer);
2067 frag->page = page->buffer;
2068 frag->page_offset = 0;
2069 frag->size = hlen;
2070 RX_USED_ADD(page, hlen + cp->crc_size);
2071 }
2072
2073 if (cp->crc_size) {
2074 addr = cas_page_map(page->buffer);
2075 crcaddr = addr + off + hlen;
2076 }
2077
2078 } else {
2079 /* copying packet */
2080 if (!dlen)
2081 goto end_copy_pkt;
2082
2083 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2084 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2085 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2086 hlen = min(cp->page_size - off, dlen);
2087 if (hlen < 0) {
2088 if (netif_msg_rx_err(cp)) {
2089 printk(KERN_DEBUG "%s: rx page overflow: "
2090 "%d\n", cp->dev->name, hlen);
2091 }
2092 dev_kfree_skb_irq(skb);
2093 return -1;
2094 }
2095 i = hlen;
2096 if (i == dlen) /* attach FCS */
2097 i += cp->crc_size;
2098 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2099 PCI_DMA_FROMDEVICE);
2100 addr = cas_page_map(page->buffer);
2101 memcpy(p, addr + off, i);
2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2103 PCI_DMA_FROMDEVICE);
2104 cas_page_unmap(addr);
2105 if (p == (char *) skb->data) /* not split */
2106 RX_USED_ADD(page, cp->mtu_stride);
2107 else
2108 RX_USED_ADD(page, i);
2109
2110 /* any more data? */
2111 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2112 p += hlen;
2113 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2114 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2115 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2116 dlen + cp->crc_size,
2117 PCI_DMA_FROMDEVICE);
2118 addr = cas_page_map(page->buffer);
2119 memcpy(p, addr, dlen + cp->crc_size);
2120 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2121 dlen + cp->crc_size,
2122 PCI_DMA_FROMDEVICE);
2123 cas_page_unmap(addr);
2124 RX_USED_ADD(page, dlen + cp->crc_size);
2125 }
2126end_copy_pkt:
2127 if (cp->crc_size) {
2128 addr = NULL;
2129 crcaddr = skb->data + alloclen;
2130 }
2131 skb_put(skb, alloclen);
2132 }
2133
2134 i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
2135 if (cp->crc_size) {
2136 /* checksum includes FCS. strip it out. */
2137 i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
2138 if (addr)
2139 cas_page_unmap(addr);
2140 }
2141 skb->csum = ntohs(i ^ 0xffff);
2142 skb->ip_summed = CHECKSUM_HW;
2143 skb->protocol = eth_type_trans(skb, cp->dev);
2144 return len;
2145}
2146
2147
2148/* we can handle up to 64 rx flows at a time. we do the same thing
2149 * as nonreassm except that we batch up the buffers.
2150 * NOTE: we currently just treat each flow as a bunch of packets that
2151 * we pass up. a better way would be to coalesce the packets
2152 * into a jumbo packet. to do that, we need to do the following:
2153 * 1) the first packet will have a clean split between header and
2154 * data. save both.
2155 * 2) each time the next flow packet comes in, extend the
2156 * data length and merge the checksums.
2157 * 3) on flow release, fix up the header.
2158 * 4) make sure the higher layer doesn't care.
2159 * because packets get coalesced, we shouldn't run into fragment count
2160 * issues.
2161 */
2162static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2163 struct sk_buff *skb)
2164{
2165 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2166 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2167
2168 /* this is protected at a higher layer, so no need to
2169 * do any additional locking here. stick the buffer
2170 * at the end.
2171 */
2172 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
2173 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2174 while ((skb = __skb_dequeue(flow))) {
2175 cas_skb_release(skb);
2176 }
2177 }
2178}
2179
2180/* put rx descriptor back on ring. if a buffer is in use by a higher
2181 * layer, this will need to put in a replacement.
2182 */
2183static void cas_post_page(struct cas *cp, const int ring, const int index)
2184{
2185 cas_page_t *new;
2186 int entry;
2187
2188 entry = cp->rx_old[ring];
2189
2190 new = cas_page_swap(cp, ring, index);
2191 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2192 cp->init_rxds[ring][entry].index =
2193 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2194 CAS_BASE(RX_INDEX_RING, ring));
2195
2196 entry = RX_DESC_ENTRY(ring, entry + 1);
2197 cp->rx_old[ring] = entry;
2198
2199 if (entry % 4)
2200 return;
2201
2202 if (ring == 0)
2203 writel(entry, cp->regs + REG_RX_KICK);
2204 else if ((N_RX_DESC_RINGS > 1) &&
2205 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2206 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2207}
2208
2209
2210/* only when things are bad */
2211static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2212{
2213 unsigned int entry, last, count, released;
2214 int cluster;
2215 cas_page_t **page = cp->rx_pages[ring];
2216
2217 entry = cp->rx_old[ring];
2218
2219 if (netif_msg_intr(cp))
2220 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2221 cp->dev->name, ring, entry);
2222
2223 cluster = -1;
2224 count = entry & 0x3;
2225 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2226 released = 0;
2227 while (entry != last) {
2228 /* make a new buffer if it's still in use */
2229 if (page_count(page[entry]->buffer) > 1) {
2230 cas_page_t *new = cas_page_dequeue(cp);
2231 if (!new) {
2232 /* let the timer know that we need to
2233 * do this again
2234 */
2235 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2236 if (!timer_pending(&cp->link_timer))
2237 mod_timer(&cp->link_timer, jiffies +
2238 CAS_LINK_FAST_TIMEOUT);
2239 cp->rx_old[ring] = entry;
2240 cp->rx_last[ring] = num ? num - released : 0;
2241 return -ENOMEM;
2242 }
2243 spin_lock(&cp->rx_inuse_lock);
2244 list_add(&page[entry]->list, &cp->rx_inuse_list);
2245 spin_unlock(&cp->rx_inuse_lock);
2246 cp->init_rxds[ring][entry].buffer =
2247 cpu_to_le64(new->dma_addr);
2248 page[entry] = new;
2249
2250 }
2251
2252 if (++count == 4) {
2253 cluster = entry;
2254 count = 0;
2255 }
2256 released++;
2257 entry = RX_DESC_ENTRY(ring, entry + 1);
2258 }
2259 cp->rx_old[ring] = entry;
2260
2261 if (cluster < 0)
2262 return 0;
2263
2264 if (ring == 0)
2265 writel(cluster, cp->regs + REG_RX_KICK);
2266 else if ((N_RX_DESC_RINGS > 1) &&
2267 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2268 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2269 return 0;
2270}
2271
2272
2273/* process a completion ring. packets are set up in three basic ways:
2274 * small packets: should be copied header + data in single buffer.
2275 * large packets: header and data in a single buffer.
2276 * split packets: header in a separate buffer from data.
2277 * data may be in multiple pages. data may be > 256
2278 * bytes but in a single page.
2279 *
2280 * NOTE: RX page posting is done in this routine as well. while there's
2281 * the capability of using multiple RX completion rings, it isn't
2282 * really worthwhile due to the fact that the page posting will
2283 * force serialization on the single descriptor ring.
2284 */
2285static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2286{
2287 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2288 int entry, drops;
2289 int npackets = 0;
2290
2291 if (netif_msg_intr(cp))
2292 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2293 cp->dev->name, ring,
2294 readl(cp->regs + REG_RX_COMP_HEAD),
2295 cp->rx_new[ring]);
2296
2297 entry = cp->rx_new[ring];
2298 drops = 0;
2299 while (1) {
2300 struct cas_rx_comp *rxc = rxcs + entry;
2301 struct sk_buff *skb;
2302 int type, len;
2303 u64 words[4];
2304 int i, dring;
2305
2306 words[0] = le64_to_cpu(rxc->word1);
2307 words[1] = le64_to_cpu(rxc->word2);
2308 words[2] = le64_to_cpu(rxc->word3);
2309 words[3] = le64_to_cpu(rxc->word4);
2310
2311 /* don't touch if still owned by hw */
2312 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2313 if (type == 0)
2314 break;
2315
2316 /* hw hasn't cleared the zero bit yet */
2317 if (words[3] & RX_COMP4_ZERO) {
2318 break;
2319 }
2320
2321 /* get info on the packet */
2322 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2323 spin_lock(&cp->stat_lock[ring]);
2324 cp->net_stats[ring].rx_errors++;
2325 if (words[3] & RX_COMP4_LEN_MISMATCH)
2326 cp->net_stats[ring].rx_length_errors++;
2327 if (words[3] & RX_COMP4_BAD)
2328 cp->net_stats[ring].rx_crc_errors++;
2329 spin_unlock(&cp->stat_lock[ring]);
2330
2331 /* We'll just return it to Cassini. */
2332 drop_it:
2333 spin_lock(&cp->stat_lock[ring]);
2334 ++cp->net_stats[ring].rx_dropped;
2335 spin_unlock(&cp->stat_lock[ring]);
2336 goto next;
2337 }
2338
2339 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2340 if (len < 0) {
2341 ++drops;
2342 goto drop_it;
2343 }
2344
2345 /* see if it's a flow re-assembly or not. the driver
2346 * itself handles release back up.
2347 */
2348 if (RX_DONT_BATCH || (type == 0x2)) {
2349 /* non-reassm: these always get released */
2350 cas_skb_release(skb);
2351 } else {
2352 cas_rx_flow_pkt(cp, words, skb);
2353 }
2354
2355 spin_lock(&cp->stat_lock[ring]);
2356 cp->net_stats[ring].rx_packets++;
2357 cp->net_stats[ring].rx_bytes += len;
2358 spin_unlock(&cp->stat_lock[ring]);
2359 cp->dev->last_rx = jiffies;
2360
2361 next:
2362 npackets++;
2363
2364 /* should it be released? */
2365 if (words[0] & RX_COMP1_RELEASE_HDR) {
2366 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2367 dring = CAS_VAL(RX_INDEX_RING, i);
2368 i = CAS_VAL(RX_INDEX_NUM, i);
2369 cas_post_page(cp, dring, i);
2370 }
2371
2372 if (words[0] & RX_COMP1_RELEASE_DATA) {
2373 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2374 dring = CAS_VAL(RX_INDEX_RING, i);
2375 i = CAS_VAL(RX_INDEX_NUM, i);
2376 cas_post_page(cp, dring, i);
2377 }
2378
2379 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2380 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2381 dring = CAS_VAL(RX_INDEX_RING, i);
2382 i = CAS_VAL(RX_INDEX_NUM, i);
2383 cas_post_page(cp, dring, i);
2384 }
2385
2386 /* skip to the next entry */
2387 entry = RX_COMP_ENTRY(ring, entry + 1 +
2388 CAS_VAL(RX_COMP1_SKIP, words[0]));
2389#ifdef USE_NAPI
2390 if (budget && (npackets >= budget))
2391 break;
2392#endif
2393 }
2394 cp->rx_new[ring] = entry;
2395
2396 if (drops)
2397 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2398 cp->dev->name);
2399 return npackets;
2400}
2401
2402
2403/* put completion entries back on the ring */
2404static void cas_post_rxcs_ringN(struct net_device *dev,
2405 struct cas *cp, int ring)
2406{
2407 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2408 int last, entry;
2409
2410 last = cp->rx_cur[ring];
2411 entry = cp->rx_new[ring];
2412 if (netif_msg_intr(cp))
2413 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2414 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2415 entry);
2416
2417 /* zero and re-mark descriptors */
2418 while (last != entry) {
2419 cas_rxc_init(rxc + last);
2420 last = RX_COMP_ENTRY(ring, last + 1);
2421 }
2422 cp->rx_cur[ring] = last;
2423
2424 if (ring == 0)
2425 writel(last, cp->regs + REG_RX_COMP_TAIL);
2426 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2427 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2428}
2429
2430
2431
2432/* cassini can use all four PCI interrupts for the completion ring.
2433 * rings 3 and 4 are identical
2434 */
2435#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2436static inline void cas_handle_irqN(struct net_device *dev,
2437 struct cas *cp, const u32 status,
2438 const int ring)
2439{
2440 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2441 cas_post_rxcs_ringN(dev, cp, ring);
2442}
2443
2444static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
2445{
2446 struct net_device *dev = dev_id;
2447 struct cas *cp = netdev_priv(dev);
2448 unsigned long flags;
2449 int ring;
2450 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2451
2452 /* check for shared irq */
2453 if (status == 0)
2454 return IRQ_NONE;
2455
2456 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2457 spin_lock_irqsave(&cp->lock, flags);
2458 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2459#ifdef USE_NAPI
2460 cas_mask_intr(cp);
2461 netif_rx_schedule(dev);
2462#else
2463 cas_rx_ringN(cp, ring, 0);
2464#endif
2465 status &= ~INTR_RX_DONE_ALT;
2466 }
2467
2468 if (status)
2469 cas_handle_irqN(dev, cp, status, ring);
2470 spin_unlock_irqrestore(&cp->lock, flags);
2471 return IRQ_HANDLED;
2472}
2473#endif
2474
2475#ifdef USE_PCI_INTB
2476/* everything but rx packets */
2477static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2478{
2479 if (status & INTR_RX_BUF_UNAVAIL_1) {
2480 /* Frame arrived, no free RX buffers available.
2481 * NOTE: we can get this on a link transition. */
2482 cas_post_rxds_ringN(cp, 1, 0);
2483 spin_lock(&cp->stat_lock[1]);
2484 cp->net_stats[1].rx_dropped++;
2485 spin_unlock(&cp->stat_lock[1]);
2486 }
2487
2488 if (status & INTR_RX_BUF_AE_1)
2489 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2490 RX_AE_FREEN_VAL(1));
2491
2492 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493 cas_post_rxcs_ringN(cp, 1);
2494}
2495
2496/* ring 2 handles a few more events than 3 and 4 */
2497static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
2498{
2499 struct net_device *dev = dev_id;
2500 struct cas *cp = netdev_priv(dev);
2501 unsigned long flags;
2502 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2503
2504 /* check for shared interrupt */
2505 if (status == 0)
2506 return IRQ_NONE;
2507
2508 spin_lock_irqsave(&cp->lock, flags);
2509 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2510#ifdef USE_NAPI
2511 cas_mask_intr(cp);
2512 netif_rx_schedule(dev);
2513#else
2514 cas_rx_ringN(cp, 1, 0);
2515#endif
2516 status &= ~INTR_RX_DONE_ALT;
2517 }
2518 if (status)
2519 cas_handle_irq1(cp, status);
2520 spin_unlock_irqrestore(&cp->lock, flags);
2521 return IRQ_HANDLED;
2522}
2523#endif
2524
2525static inline void cas_handle_irq(struct net_device *dev,
2526 struct cas *cp, const u32 status)
2527{
2528 /* housekeeping interrupts */
2529 if (status & INTR_ERROR_MASK)
2530 cas_abnormal_irq(dev, cp, status);
2531
2532 if (status & INTR_RX_BUF_UNAVAIL) {
2533 /* Frame arrived, no free RX buffers available.
2534 * NOTE: we can get this on a link transition.
2535 */
2536 cas_post_rxds_ringN(cp, 0, 0);
2537 spin_lock(&cp->stat_lock[0]);
2538 cp->net_stats[0].rx_dropped++;
2539 spin_unlock(&cp->stat_lock[0]);
2540 } else if (status & INTR_RX_BUF_AE) {
2541 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2542 RX_AE_FREEN_VAL(0));
2543 }
2544
2545 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2546 cas_post_rxcs_ringN(dev, cp, 0);
2547}
2548
2549static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2550{
2551 struct net_device *dev = dev_id;
2552 struct cas *cp = netdev_priv(dev);
2553 unsigned long flags;
2554 u32 status = readl(cp->regs + REG_INTR_STATUS);
2555
2556 if (status == 0)
2557 return IRQ_NONE;
2558
2559 spin_lock_irqsave(&cp->lock, flags);
2560 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2561 cas_tx(dev, cp, status);
2562 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2563 }
2564
2565 if (status & INTR_RX_DONE) {
2566#ifdef USE_NAPI
2567 cas_mask_intr(cp);
2568 netif_rx_schedule(dev);
2569#else
2570 cas_rx_ringN(cp, 0, 0);
2571#endif
2572 status &= ~INTR_RX_DONE;
2573 }
2574
2575 if (status)
2576 cas_handle_irq(dev, cp, status);
2577 spin_unlock_irqrestore(&cp->lock, flags);
2578 return IRQ_HANDLED;
2579}
2580
2581
2582#ifdef USE_NAPI
2583static int cas_poll(struct net_device *dev, int *budget)
2584{
2585 struct cas *cp = netdev_priv(dev);
2586 int i, enable_intr, todo, credits;
2587 u32 status = readl(cp->regs + REG_INTR_STATUS);
2588 unsigned long flags;
2589
2590 spin_lock_irqsave(&cp->lock, flags);
2591 cas_tx(dev, cp, status);
2592 spin_unlock_irqrestore(&cp->lock, flags);
2593
2594 /* NAPI rx packets. we spread the credits across all of the
2595 * rxc rings
2596 */
2597 todo = min(*budget, dev->quota);
2598
2599 /* to make sure we're fair with the work we loop through each
2600 * ring N_RX_COMP_RING times with a request of
2601 * todo / N_RX_COMP_RINGS
2602 */
2603 enable_intr = 1;
2604 credits = 0;
2605 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2606 int j;
2607 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2608 credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
2609 if (credits >= todo) {
2610 enable_intr = 0;
2611 goto rx_comp;
2612 }
2613 }
2614 }
2615
2616rx_comp:
2617 *budget -= credits;
2618 dev->quota -= credits;
2619
2620 /* final rx completion */
2621 spin_lock_irqsave(&cp->lock, flags);
2622 if (status)
2623 cas_handle_irq(dev, cp, status);
2624
2625#ifdef USE_PCI_INTB
2626 if (N_RX_COMP_RINGS > 1) {
2627 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2628 if (status)
2629 cas_handle_irq1(dev, cp, status);
2630 }
2631#endif
2632
2633#ifdef USE_PCI_INTC
2634 if (N_RX_COMP_RINGS > 2) {
2635 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2636 if (status)
2637 cas_handle_irqN(dev, cp, status, 2);
2638 }
2639#endif
2640
2641#ifdef USE_PCI_INTD
2642 if (N_RX_COMP_RINGS > 3) {
2643 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2644 if (status)
2645 cas_handle_irqN(dev, cp, status, 3);
2646 }
2647#endif
2648 spin_unlock_irqrestore(&cp->lock, flags);
2649 if (enable_intr) {
2650 netif_rx_complete(dev);
2651 cas_unmask_intr(cp);
2652 return 0;
2653 }
2654 return 1;
2655}
2656#endif
2657
2658#ifdef CONFIG_NET_POLL_CONTROLLER
2659static void cas_netpoll(struct net_device *dev)
2660{
2661 struct cas *cp = netdev_priv(dev);
2662
2663 cas_disable_irq(cp, 0);
2664 cas_interrupt(cp->pdev->irq, dev, NULL);
2665 cas_enable_irq(cp, 0);
2666
2667#ifdef USE_PCI_INTB
2668 if (N_RX_COMP_RINGS > 1) {
2669 /* cas_interrupt1(); */
2670 }
2671#endif
2672#ifdef USE_PCI_INTC
2673 if (N_RX_COMP_RINGS > 2) {
2674 /* cas_interruptN(); */
2675 }
2676#endif
2677#ifdef USE_PCI_INTD
2678 if (N_RX_COMP_RINGS > 3) {
2679 /* cas_interruptN(); */
2680 }
2681#endif
2682}
2683#endif
2684
2685static void cas_tx_timeout(struct net_device *dev)
2686{
2687 struct cas *cp = netdev_priv(dev);
2688
2689 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2690 if (!cp->hw_running) {
2691 printk("%s: hrm.. hw not running!\n", dev->name);
2692 return;
2693 }
2694
2695 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2696 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2697
2698 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2699 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2700
2701 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2702 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2703 dev->name,
2704 readl(cp->regs + REG_TX_CFG),
2705 readl(cp->regs + REG_MAC_TX_STATUS),
2706 readl(cp->regs + REG_MAC_TX_CFG),
2707 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2708 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2709 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2710 readl(cp->regs + REG_TX_SM_1),
2711 readl(cp->regs + REG_TX_SM_2));
2712
2713 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2714 dev->name,
2715 readl(cp->regs + REG_RX_CFG),
2716 readl(cp->regs + REG_MAC_RX_STATUS),
2717 readl(cp->regs + REG_MAC_RX_CFG));
2718
2719 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2720 dev->name,
2721 readl(cp->regs + REG_HP_STATE_MACHINE),
2722 readl(cp->regs + REG_HP_STATUS0),
2723 readl(cp->regs + REG_HP_STATUS1),
2724 readl(cp->regs + REG_HP_STATUS2));
2725
2726#if 1
2727 atomic_inc(&cp->reset_task_pending);
2728 atomic_inc(&cp->reset_task_pending_all);
2729 schedule_work(&cp->reset_task);
2730#else
2731 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2732 schedule_work(&cp->reset_task);
2733#endif
2734}
2735
2736static inline int cas_intme(int ring, int entry)
2737{
2738 /* Algorithm: IRQ every 1/2 of descriptors. */
2739 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2740 return 1;
2741 return 0;
2742}
2743
2744
2745static void cas_write_txd(struct cas *cp, int ring, int entry,
2746 dma_addr_t mapping, int len, u64 ctrl, int last)
2747{
2748 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2749
2750 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2751 if (cas_intme(ring, entry))
2752 ctrl |= TX_DESC_INTME;
2753 if (last)
2754 ctrl |= TX_DESC_EOF;
2755 txd->control = cpu_to_le64(ctrl);
2756 txd->buffer = cpu_to_le64(mapping);
2757}
2758
2759static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2760 const int entry)
2761{
2762 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2763}
2764
2765static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2766 const int entry, const int tentry)
2767{
2768 cp->tx_tiny_use[ring][tentry].nbufs++;
2769 cp->tx_tiny_use[ring][entry].used = 1;
2770 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2771}
2772
2773static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2774 struct sk_buff *skb)
2775{
2776 struct net_device *dev = cp->dev;
2777 int entry, nr_frags, frag, tabort, tentry;
2778 dma_addr_t mapping;
2779 unsigned long flags;
2780 u64 ctrl;
2781 u32 len;
2782
2783 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2784
2785 /* This is a hard error, log it. */
2786 if (TX_BUFFS_AVAIL(cp, ring) <=
2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2788 netif_stop_queue(dev);
2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2790 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2791 "queue awake!\n", dev->name);
2792 return 1;
2793 }
2794
2795 ctrl = 0;
2796 if (skb->ip_summed == CHECKSUM_HW) {
2797 u64 csum_start_off, csum_stuff_off;
2798
2799 csum_start_off = (u64) (skb->h.raw - skb->data);
2800 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
2801
2802 ctrl = TX_DESC_CSUM_EN |
2803 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2804 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2805 }
2806
2807 entry = cp->tx_new[ring];
2808 cp->tx_skbs[ring][entry] = skb;
2809
2810 nr_frags = skb_shinfo(skb)->nr_frags;
2811 len = skb_headlen(skb);
2812 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2813 offset_in_page(skb->data), len,
2814 PCI_DMA_TODEVICE);
2815
2816 tentry = entry;
2817 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2818 if (unlikely(tabort)) {
2819 /* NOTE: len is always > tabort */
2820 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2821 ctrl | TX_DESC_SOF, 0);
2822 entry = TX_DESC_NEXT(ring, entry);
2823
2824 memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
2825 len - tabort, tabort);
2826 mapping = tx_tiny_map(cp, ring, entry, tentry);
2827 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2828 (nr_frags == 0));
2829 } else {
2830 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2831 TX_DESC_SOF, (nr_frags == 0));
2832 }
2833 entry = TX_DESC_NEXT(ring, entry);
2834
2835 for (frag = 0; frag < nr_frags; frag++) {
2836 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2837
2838 len = fragp->size;
2839 mapping = pci_map_page(cp->pdev, fragp->page,
2840 fragp->page_offset, len,
2841 PCI_DMA_TODEVICE);
2842
2843 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2844 if (unlikely(tabort)) {
2845 void *addr;
2846
2847 /* NOTE: len is always > tabort */
2848 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2849 ctrl, 0);
2850 entry = TX_DESC_NEXT(ring, entry);
2851
2852 addr = cas_page_map(fragp->page);
2853 memcpy(tx_tiny_buf(cp, ring, entry),
2854 addr + fragp->page_offset + len - tabort,
2855 tabort);
2856 cas_page_unmap(addr);
2857 mapping = tx_tiny_map(cp, ring, entry, tentry);
2858 len = tabort;
2859 }
2860
2861 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2862 (frag + 1 == nr_frags));
2863 entry = TX_DESC_NEXT(ring, entry);
2864 }
2865
2866 cp->tx_new[ring] = entry;
2867 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2868 netif_stop_queue(dev);
2869
2870 if (netif_msg_tx_queued(cp))
2871 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2872 "avail %d\n",
2873 dev->name, ring, entry, skb->len,
2874 TX_BUFFS_AVAIL(cp, ring));
2875 writel(entry, cp->regs + REG_TX_KICKN(ring));
2876 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2877 return 0;
2878}
2879
2880static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2881{
2882 struct cas *cp = netdev_priv(dev);
2883
2884 /* this is only used as a load-balancing hint, so it doesn't
2885 * need to be SMP safe
2886 */
2887 static int ring;
2888
2889 skb = skb_padto(skb, cp->min_frame_size);
2890 if (!skb)
2891 return 0;
2892
2893 /* XXX: we need some higher-level QoS hooks to steer packets to
2894 * individual queues.
2895 */
2896 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2897 return 1;
2898 dev->trans_start = jiffies;
2899 return 0;
2900}
2901
2902static void cas_init_tx_dma(struct cas *cp)
2903{
2904 u64 desc_dma = cp->block_dvma;
2905 unsigned long off;
2906 u32 val;
2907 int i;
2908
2909 /* set up tx completion writeback registers. must be 8-byte aligned */
2910#ifdef USE_TX_COMPWB
2911 off = offsetof(struct cas_init_block, tx_compwb);
2912 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2913 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2914#endif
2915
2916 /* enable completion writebacks, enable paced mode,
2917 * disable read pipe, and disable pre-interrupt compwbs
2918 */
2919 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2920 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2921 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2922 TX_CFG_INTR_COMPWB_DIS;
2923
2924 /* write out tx ring info and tx desc bases */
2925 for (i = 0; i < MAX_TX_RINGS; i++) {
2926 off = (unsigned long) cp->init_txds[i] -
2927 (unsigned long) cp->init_block;
2928
2929 val |= CAS_TX_RINGN_BASE(i);
2930 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2931 writel((desc_dma + off) & 0xffffffff, cp->regs +
2932 REG_TX_DBN_LOW(i));
2933 /* don't zero out the kick register here as the system
2934 * will wedge
2935 */
2936 }
2937 writel(val, cp->regs + REG_TX_CFG);
2938
2939 /* program max burst sizes. these numbers should be different
2940 * if doing QoS.
2941 */
2942#ifdef USE_QOS
2943 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2944 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2945 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2946 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2947#else
2948 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2949 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2950 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2951 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2952#endif
2953}
2954
2955/* Must be invoked under cp->lock. */
2956static inline void cas_init_dma(struct cas *cp)
2957{
2958 cas_init_tx_dma(cp);
2959 cas_init_rx_dma(cp);
2960}
2961
2962/* Must be invoked under cp->lock. */
2963static u32 cas_setup_multicast(struct cas *cp)
2964{
2965 u32 rxcfg = 0;
2966 int i;
2967
2968 if (cp->dev->flags & IFF_PROMISC) {
2969 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2970
2971 } else if (cp->dev->flags & IFF_ALLMULTI) {
2972 for (i=0; i < 16; i++)
2973 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2974 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2975
2976 } else {
2977 u16 hash_table[16];
2978 u32 crc;
2979 struct dev_mc_list *dmi = cp->dev->mc_list;
2980 int i;
2981
2982 /* use the alternate mac address registers for the
2983 * first 15 multicast addresses
2984 */
2985 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
2986 if (!dmi) {
2987 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
2988 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
2989 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
2990 continue;
2991 }
2992 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
2993 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2994 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
2995 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2996 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
2997 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2998 dmi = dmi->next;
2999 }
3000
3001 /* use hw hash table for the next series of
3002 * multicast addresses
3003 */
3004 memset(hash_table, 0, sizeof(hash_table));
3005 while (dmi) {
3006 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3007 crc >>= 24;
3008 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3009 dmi = dmi->next;
3010 }
3011 for (i=0; i < 16; i++)
3012 writel(hash_table[i], cp->regs +
3013 REG_MAC_HASH_TABLEN(i));
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3015 }
3016
3017 return rxcfg;
3018}
3019
3020/* must be invoked under cp->stat_lock[N_TX_RINGS] */
3021static void cas_clear_mac_err(struct cas *cp)
3022{
3023 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3024 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3025 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3026 writel(0, cp->regs + REG_MAC_COLL_LATE);
3027 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3028 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3029 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3030 writel(0, cp->regs + REG_MAC_LEN_ERR);
3031 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3032 writel(0, cp->regs + REG_MAC_FCS_ERR);
3033 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3034}
3035
3036
3037static void cas_mac_reset(struct cas *cp)
3038{
3039 int i;
3040
3041 /* do both TX and RX reset */
3042 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3043 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3044
3045 /* wait for TX */
3046 i = STOP_TRIES;
3047 while (i-- > 0) {
3048 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3049 break;
3050 udelay(10);
3051 }
3052
3053 /* wait for RX */
3054 i = STOP_TRIES;
3055 while (i-- > 0) {
3056 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3057 break;
3058 udelay(10);
3059 }
3060
3061 if (readl(cp->regs + REG_MAC_TX_RESET) |
3062 readl(cp->regs + REG_MAC_RX_RESET))
3063 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3064 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3065 readl(cp->regs + REG_MAC_RX_RESET),
3066 readl(cp->regs + REG_MAC_STATE_MACHINE));
3067}
3068
3069
3070/* Must be invoked under cp->lock. */
3071static void cas_init_mac(struct cas *cp)
3072{
3073 unsigned char *e = &cp->dev->dev_addr[0];
3074 int i;
3075#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3076 u32 rxcfg;
3077#endif
3078 cas_mac_reset(cp);
3079
3080 /* setup core arbitration weight register */
3081 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3082
3083 /* XXX Use pci_dma_burst_advice() */
3084#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3085 /* set the infinite burst register for chips that don't have
3086 * pci issues.
3087 */
3088 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3089 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3090#endif
3091
3092 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3093
3094 writel(0x00, cp->regs + REG_MAC_IPG0);
3095 writel(0x08, cp->regs + REG_MAC_IPG1);
3096 writel(0x04, cp->regs + REG_MAC_IPG2);
3097
3098 /* change later for 802.3z */
3099 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3100
3101 /* min frame + FCS */
3102 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3103
3104 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3105 * specify the maximum frame size to prevent RX tag errors on
3106 * oversized frames.
3107 */
3108 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3109 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3110 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3111 cp->regs + REG_MAC_FRAMESIZE_MAX);
3112
3113 /* NOTE: crc_size is used as a surrogate for half-duplex.
3114 * workaround saturn half-duplex issue by increasing preamble
3115 * size to 65 bytes.
3116 */
3117 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3118 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3119 else
3120 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3121 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3122 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3123 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3124
3125 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3126
3127 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3128 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3129 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3130 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3131 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3132
3133 /* setup mac address in perfect filter array */
3134 for (i = 0; i < 45; i++)
3135 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3136
3137 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3138 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3139 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3140
3141 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3142 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3143 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3144
3145#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3146 cp->mac_rx_cfg = cas_setup_multicast(cp);
3147#else
3148 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3149 * a writel does not seem to be necessary because Cassini
3150 * seems to preserve the configuration when we do the reset.
3151 * If the chip is in trouble, though, it is not clear if we
3152 * can really count on this behavior. cas_set_multicast uses
3153 * spin_lock_irqsave, but we are called only in cas_init_hw and
3154 * cas_init_hw is protected by cas_lock_all, which calls
3155 * spin_lock_irq (so it doesn't need to save the flags, and
3156 * we should be OK for the writel, as that is the only
3157 * difference).
3158 */
3159 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3160 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3161#endif
3162 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3163 cas_clear_mac_err(cp);
3164 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3165
3166 /* Setup MAC interrupts. We want to get all of the interesting
3167 * counter expiration events, but we do not want to hear about
3168 * normal rx/tx as the DMA engine tells us that.
3169 */
3170 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3171 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3172
3173 /* Don't enable even the PAUSE interrupts for now, we
3174 * make no use of those events other than to record them.
3175 */
3176 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3177}
3178
3179/* Must be invoked under cp->lock. */
3180static void cas_init_pause_thresholds(struct cas *cp)
3181{
3182 /* Calculate pause thresholds. Setting the OFF threshold to the
3183 * full RX fifo size effectively disables PAUSE generation
3184 */
3185 if (cp->rx_fifo_size <= (2 * 1024)) {
3186 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3187 } else {
3188 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3189 if (max_frame * 3 > cp->rx_fifo_size) {
3190 cp->rx_pause_off = 7104;
3191 cp->rx_pause_on = 960;
3192 } else {
3193 int off = (cp->rx_fifo_size - (max_frame * 2));
3194 int on = off - max_frame;
3195 cp->rx_pause_off = off;
3196 cp->rx_pause_on = on;
3197 }
3198 }
3199}
3200
3201static int cas_vpd_match(const void __iomem *p, const char *str)
3202{
3203 int len = strlen(str) + 1;
3204 int i;
3205
3206 for (i = 0; i < len; i++) {
3207 if (readb(p + i) != str[i])
3208 return 0;
3209 }
3210 return 1;
3211}
3212
3213
3214/* get the mac address by reading the vpd information in the rom.
3215 * also get the phy type and determine if there's an entropy generator.
3216 * NOTE: this is a bit convoluted for the following reasons:
3217 * 1) vpd info has order-dependent mac addresses for multinic cards
3218 * 2) the only way to determine the nic order is to use the slot
3219 * number.
3220 * 3) fiber cards don't have bridges, so their slot numbers don't
3221 * mean anything.
3222 * 4) we don't actually know we have a fiber card until after
3223 * the mac addresses are parsed.
3224 */
3225static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3226 const int offset)
3227{
3228 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3229 void __iomem *base, *kstart;
3230 int i, len;
3231 int found = 0;
3232#define VPD_FOUND_MAC 0x01
3233#define VPD_FOUND_PHY 0x02
3234
3235 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3236 int mac_off = 0;
3237
3238 /* give us access to the PROM */
3239 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3240 cp->regs + REG_BIM_LOCAL_DEV_EN);
3241
3242 /* check for an expansion rom */
3243 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3244 goto use_random_mac_addr;
3245
3246 /* search for beginning of vpd */
3247 base = NULL;
3248 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3249 /* check for PCIR */
3250 if ((readb(p + i + 0) == 0x50) &&
3251 (readb(p + i + 1) == 0x43) &&
3252 (readb(p + i + 2) == 0x49) &&
3253 (readb(p + i + 3) == 0x52)) {
3254 base = p + (readb(p + i + 8) |
3255 (readb(p + i + 9) << 8));
3256 break;
3257 }
3258 }
3259
3260 if (!base || (readb(base) != 0x82))
3261 goto use_random_mac_addr;
3262
3263 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3264 while (i < EXPANSION_ROM_SIZE) {
3265 if (readb(base + i) != 0x90) /* no vpd found */
3266 goto use_random_mac_addr;
3267
3268 /* found a vpd field */
3269 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3270
3271 /* extract keywords */
3272 kstart = base + i + 3;
3273 p = kstart;
3274 while ((p - kstart) < len) {
3275 int klen = readb(p + 2);
3276 int j;
3277 char type;
3278
3279 p += 3;
3280
3281 /* look for the following things:
3282 * -- correct length == 29
3283 * 3 (type) + 2 (size) +
3284 * 18 (strlen("local-mac-address") + 1) +
3285 * 6 (mac addr)
3286 * -- VPD Instance 'I'
3287 * -- VPD Type Bytes 'B'
3288 * -- VPD data length == 6
3289 * -- property string == local-mac-address
3290 *
3291 * -- correct length == 24
3292 * 3 (type) + 2 (size) +
3293 * 12 (strlen("entropy-dev") + 1) +
3294 * 7 (strlen("vms110") + 1)
3295 * -- VPD Instance 'I'
3296 * -- VPD Type String 'B'
3297 * -- VPD data length == 7
3298 * -- property string == entropy-dev
3299 *
3300 * -- correct length == 18
3301 * 3 (type) + 2 (size) +
3302 * 9 (strlen("phy-type") + 1) +
3303 * 4 (strlen("pcs") + 1)
3304 * -- VPD Instance 'I'
3305 * -- VPD Type String 'S'
3306 * -- VPD data length == 4
3307 * -- property string == phy-type
3308 *
3309 * -- correct length == 23
3310 * 3 (type) + 2 (size) +
3311 * 14 (strlen("phy-interface") + 1) +
3312 * 4 (strlen("pcs") + 1)
3313 * -- VPD Instance 'I'
3314 * -- VPD Type String 'S'
3315 * -- VPD data length == 4
3316 * -- property string == phy-interface
3317 */
3318 if (readb(p) != 'I')
3319 goto next;
3320
3321 /* finally, check string and length */
3322 type = readb(p + 3);
3323 if (type == 'B') {
3324 if ((klen == 29) && readb(p + 4) == 6 &&
3325 cas_vpd_match(p + 5,
3326 "local-mac-address")) {
3327 if (mac_off++ > offset)
3328 goto next;
3329
3330 /* set mac address */
3331 for (j = 0; j < 6; j++)
3332 dev_addr[j] =
3333 readb(p + 23 + j);
3334 goto found_mac;
3335 }
3336 }
3337
3338 if (type != 'S')
3339 goto next;
3340
3341#ifdef USE_ENTROPY_DEV
3342 if ((klen == 24) &&
3343 cas_vpd_match(p + 5, "entropy-dev") &&
3344 cas_vpd_match(p + 17, "vms110")) {
3345 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3346 goto next;
3347 }
3348#endif
3349
3350 if (found & VPD_FOUND_PHY)
3351 goto next;
3352
3353 if ((klen == 18) && readb(p + 4) == 4 &&
3354 cas_vpd_match(p + 5, "phy-type")) {
3355 if (cas_vpd_match(p + 14, "pcs")) {
3356 phy_type = CAS_PHY_SERDES;
3357 goto found_phy;
3358 }
3359 }
3360
3361 if ((klen == 23) && readb(p + 4) == 4 &&
3362 cas_vpd_match(p + 5, "phy-interface")) {
3363 if (cas_vpd_match(p + 19, "pcs")) {
3364 phy_type = CAS_PHY_SERDES;
3365 goto found_phy;
3366 }
3367 }
3368found_mac:
3369 found |= VPD_FOUND_MAC;
3370 goto next;
3371
3372found_phy:
3373 found |= VPD_FOUND_PHY;
3374
3375next:
3376 p += klen;
3377 }
3378 i += len + 3;
3379 }
3380
3381use_random_mac_addr:
3382 if (found & VPD_FOUND_MAC)
3383 goto done;
3384
3385 /* Sun MAC prefix then 3 random bytes. */
3386 printk(PFX "MAC address not found in ROM VPD\n");
3387 dev_addr[0] = 0x08;
3388 dev_addr[1] = 0x00;
3389 dev_addr[2] = 0x20;
3390 get_random_bytes(dev_addr + 3, 3);
3391
3392done:
3393 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3394 return phy_type;
3395}
3396
3397/* check pci invariants */
3398static void cas_check_pci_invariants(struct cas *cp)
3399{
3400 struct pci_dev *pdev = cp->pdev;
3401 u8 rev;
3402
3403 cp->cas_flags = 0;
3404 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
3405 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3406 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3407 if (rev >= CAS_ID_REVPLUS)
3408 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3409 if (rev < CAS_ID_REVPLUS02u)
3410 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3411
3412 /* Original Cassini supports HW CSUM, but it's not
3413 * enabled by default as it can trigger TX hangs.
3414 */
3415 if (rev < CAS_ID_REV2)
3416 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3417 } else {
3418 /* Only sun has original cassini chips. */
3419 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3420
3421 /* We use a flag because the same phy might be externally
3422 * connected.
3423 */
3424 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3425 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3426 cp->cas_flags |= CAS_FLAG_SATURN;
3427 }
3428}
3429
3430
3431static int cas_check_invariants(struct cas *cp)
3432{
3433 struct pci_dev *pdev = cp->pdev;
3434 u32 cfg;
3435 int i;
3436
3437 /* get page size for rx buffers. */
3438 cp->page_order = 0;
3439#ifdef USE_PAGE_ORDER
3440 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3441 /* see if we can allocate larger pages */
3442 struct page *page = alloc_pages(GFP_ATOMIC,
3443 CAS_JUMBO_PAGE_SHIFT -
3444 PAGE_SHIFT);
3445 if (page) {
3446 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3447 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3448 } else {
3449 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
3450 }
3451 }
3452#endif
3453 cp->page_size = (PAGE_SIZE << cp->page_order);
3454
3455 /* Fetch the FIFO configurations. */
3456 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3457 cp->rx_fifo_size = RX_FIFO_SIZE;
3458
3459 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3460 * they're both connected.
3461 */
3462 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3463 PCI_SLOT(pdev->devfn));
3464 if (cp->phy_type & CAS_PHY_SERDES) {
3465 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3466 return 0; /* no more checking needed */
3467 }
3468
3469 /* MII */
3470 cfg = readl(cp->regs + REG_MIF_CFG);
3471 if (cfg & MIF_CFG_MDIO_1) {
3472 cp->phy_type = CAS_PHY_MII_MDIO1;
3473 } else if (cfg & MIF_CFG_MDIO_0) {
3474 cp->phy_type = CAS_PHY_MII_MDIO0;
3475 }
3476
3477 cas_mif_poll(cp, 0);
3478 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3479
3480 for (i = 0; i < 32; i++) {
3481 u32 phy_id;
3482 int j;
3483
3484 for (j = 0; j < 3; j++) {
3485 cp->phy_addr = i;
3486 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3487 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3488 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3489 cp->phy_id = phy_id;
3490 goto done;
3491 }
3492 }
3493 }
3494 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
3495 readl(cp->regs + REG_MIF_STATE_MACHINE));
3496 return -1;
3497
3498done:
3499 /* see if we can do gigabit */
3500 cfg = cas_phy_read(cp, MII_BMSR);
3501 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3502 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3503 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3504 return 0;
3505}
3506
3507/* Must be invoked under cp->lock. */
3508static inline void cas_start_dma(struct cas *cp)
3509{
3510 int i;
3511 u32 val;
3512 int txfailed = 0;
3513
3514 /* enable dma */
3515 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3516 writel(val, cp->regs + REG_TX_CFG);
3517 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3518 writel(val, cp->regs + REG_RX_CFG);
3519
3520 /* enable the mac */
3521 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3522 writel(val, cp->regs + REG_MAC_TX_CFG);
3523 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3524 writel(val, cp->regs + REG_MAC_RX_CFG);
3525
3526 i = STOP_TRIES;
3527 while (i-- > 0) {
3528 val = readl(cp->regs + REG_MAC_TX_CFG);
3529 if ((val & MAC_TX_CFG_EN))
3530 break;
3531 udelay(10);
3532 }
3533 if (i < 0) txfailed = 1;
3534 i = STOP_TRIES;
3535 while (i-- > 0) {
3536 val = readl(cp->regs + REG_MAC_RX_CFG);
3537 if ((val & MAC_RX_CFG_EN)) {
3538 if (txfailed) {
3539 printk(KERN_ERR
3540 "%s: enabling mac failed [tx:%08x:%08x].\n",
3541 cp->dev->name,
3542 readl(cp->regs + REG_MIF_STATE_MACHINE),
3543 readl(cp->regs + REG_MAC_STATE_MACHINE));
3544 }
3545 goto enable_rx_done;
3546 }
3547 udelay(10);
3548 }
3549 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3550 cp->dev->name,
3551 (txfailed? "tx,rx":"rx"),
3552 readl(cp->regs + REG_MIF_STATE_MACHINE),
3553 readl(cp->regs + REG_MAC_STATE_MACHINE));
3554
3555enable_rx_done:
3556 cas_unmask_intr(cp); /* enable interrupts */
3557 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3558 writel(0, cp->regs + REG_RX_COMP_TAIL);
3559
3560 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3561 if (N_RX_DESC_RINGS > 1)
3562 writel(RX_DESC_RINGN_SIZE(1) - 4,
3563 cp->regs + REG_PLUS_RX_KICK1);
3564
3565 for (i = 1; i < N_RX_COMP_RINGS; i++)
3566 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3567 }
3568}
3569
3570/* Must be invoked under cp->lock. */
3571static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3572 int *pause)
3573{
3574 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3575 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3576 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3577 if (val & PCS_MII_LPA_ASYM_PAUSE)
3578 *pause |= 0x10;
3579 *spd = 1000;
3580}
3581
3582/* Must be invoked under cp->lock. */
3583static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3584 int *pause)
3585{
3586 u32 val;
3587
3588 *fd = 0;
3589 *spd = 10;
3590 *pause = 0;
3591
3592 /* use GMII registers */
3593 val = cas_phy_read(cp, MII_LPA);
3594 if (val & CAS_LPA_PAUSE)
3595 *pause = 0x01;
3596
3597 if (val & CAS_LPA_ASYM_PAUSE)
3598 *pause |= 0x10;
3599
3600 if (val & LPA_DUPLEX)
3601 *fd = 1;
3602 if (val & LPA_100)
3603 *spd = 100;
3604
3605 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3606 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3607 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3608 *spd = 1000;
3609 if (val & CAS_LPA_1000FULL)
3610 *fd = 1;
3611 }
3612}
3613
3614/* A link-up condition has occurred, initialize and enable the
3615 * rest of the chip.
3616 *
3617 * Must be invoked under cp->lock.
3618 */
3619static void cas_set_link_modes(struct cas *cp)
3620{
3621 u32 val;
3622 int full_duplex, speed, pause;
3623
3624 full_duplex = 0;
3625 speed = 10;
3626 pause = 0;
3627
3628 if (CAS_PHY_MII(cp->phy_type)) {
3629 cas_mif_poll(cp, 0);
3630 val = cas_phy_read(cp, MII_BMCR);
3631 if (val & BMCR_ANENABLE) {
3632 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3633 &pause);
3634 } else {
3635 if (val & BMCR_FULLDPLX)
3636 full_duplex = 1;
3637
3638 if (val & BMCR_SPEED100)
3639 speed = 100;
3640 else if (val & CAS_BMCR_SPEED1000)
3641 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3642 1000 : 100;
3643 }
3644 cas_mif_poll(cp, 1);
3645
3646 } else {
3647 val = readl(cp->regs + REG_PCS_MII_CTRL);
3648 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3649 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3650 if (val & PCS_MII_CTRL_DUPLEX)
3651 full_duplex = 1;
3652 }
3653 }
3654
3655 if (netif_msg_link(cp))
3656 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
3657 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3658
3659 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3660 if (CAS_PHY_MII(cp->phy_type)) {
3661 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3662 if (!full_duplex)
3663 val |= MAC_XIF_DISABLE_ECHO;
3664 }
3665 if (full_duplex)
3666 val |= MAC_XIF_FDPLX_LED;
3667 if (speed == 1000)
3668 val |= MAC_XIF_GMII_MODE;
3669 writel(val, cp->regs + REG_MAC_XIF_CFG);
3670
3671 /* deal with carrier and collision detect. */
3672 val = MAC_TX_CFG_IPG_EN;
3673 if (full_duplex) {
3674 val |= MAC_TX_CFG_IGNORE_CARRIER;
3675 val |= MAC_TX_CFG_IGNORE_COLL;
3676 } else {
3677#ifndef USE_CSMA_CD_PROTO
3678 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3679 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3680#endif
3681 }
3682 /* val now set up for REG_MAC_TX_CFG */
3683
3684 /* If gigabit and half-duplex, enable carrier extension
3685 * mode. increase slot time to 512 bytes as well.
3686 * else, disable it and make sure slot time is 64 bytes.
3687 * also activate checksum bug workaround
3688 */
3689 if ((speed == 1000) && !full_duplex) {
3690 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3691 cp->regs + REG_MAC_TX_CFG);
3692
3693 val = readl(cp->regs + REG_MAC_RX_CFG);
3694 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3695 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3696 cp->regs + REG_MAC_RX_CFG);
3697
3698 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3699
3700 cp->crc_size = 4;
3701 /* minimum size gigabit frame at half duplex */
3702 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3703
3704 } else {
3705 writel(val, cp->regs + REG_MAC_TX_CFG);
3706
3707 /* checksum bug workaround. don't strip FCS when in
3708 * half-duplex mode
3709 */
3710 val = readl(cp->regs + REG_MAC_RX_CFG);
3711 if (full_duplex) {
3712 val |= MAC_RX_CFG_STRIP_FCS;
3713 cp->crc_size = 0;
3714 cp->min_frame_size = CAS_MIN_MTU;
3715 } else {
3716 val &= ~MAC_RX_CFG_STRIP_FCS;
3717 cp->crc_size = 4;
3718 cp->min_frame_size = CAS_MIN_FRAME;
3719 }
3720 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3721 cp->regs + REG_MAC_RX_CFG);
3722 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3723 }
3724
3725 if (netif_msg_link(cp)) {
3726 if (pause & 0x01) {
3727 printk(KERN_INFO "%s: Pause is enabled "
3728 "(rxfifo: %d off: %d on: %d)\n",
3729 cp->dev->name,
3730 cp->rx_fifo_size,
3731 cp->rx_pause_off,
3732 cp->rx_pause_on);
3733 } else if (pause & 0x10) {
3734 printk(KERN_INFO "%s: TX pause enabled\n",
3735 cp->dev->name);
3736 } else {
3737 printk(KERN_INFO "%s: Pause is disabled\n",
3738 cp->dev->name);
3739 }
3740 }
3741
3742 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3743 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3744 if (pause) { /* symmetric or asymmetric pause */
3745 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3746 if (pause & 0x01) { /* symmetric pause */
3747 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3748 }
3749 }
3750 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3751 cas_start_dma(cp);
3752}
3753
3754/* Must be invoked under cp->lock. */
3755static void cas_init_hw(struct cas *cp, int restart_link)
3756{
3757 if (restart_link)
3758 cas_phy_init(cp);
3759
3760 cas_init_pause_thresholds(cp);
3761 cas_init_mac(cp);
3762 cas_init_dma(cp);
3763
3764 if (restart_link) {
3765 /* Default aneg parameters */
3766 cp->timer_ticks = 0;
3767 cas_begin_auto_negotiation(cp, NULL);
3768 } else if (cp->lstate == link_up) {
3769 cas_set_link_modes(cp);
3770 netif_carrier_on(cp->dev);
3771 }
3772}
3773
3774/* Must be invoked under cp->lock. on earlier cassini boards,
3775 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3776 * let it settle out, and then restore pci state.
3777 */
3778static void cas_hard_reset(struct cas *cp)
3779{
3780 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3781 udelay(20);
3782 pci_restore_state(cp->pdev);
3783}
3784
3785
3786static void cas_global_reset(struct cas *cp, int blkflag)
3787{
3788 int limit;
3789
3790 /* issue a global reset. don't use RSTOUT. */
3791 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3792 /* For PCS, when the blkflag is set, we should set the
3793 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3794 * the last autonegotiation from being cleared. We'll
3795 * need some special handling if the chip is set into a
3796 * loopback mode.
3797 */
3798 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3799 cp->regs + REG_SW_RESET);
3800 } else {
3801 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3802 }
3803
3804 /* need to wait at least 3ms before polling register */
3805 mdelay(3);
3806
3807 limit = STOP_TRIES;
3808 while (limit-- > 0) {
3809 u32 val = readl(cp->regs + REG_SW_RESET);
3810 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3811 goto done;
3812 udelay(10);
3813 }
3814 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
3815
3816done:
3817 /* enable various BIM interrupts */
3818 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3819 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3820
3821 /* clear out pci error status mask for handled errors.
3822 * we don't deal with DMA counter overflows as they happen
3823 * all the time.
3824 */
3825 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3826 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3827 PCI_ERR_BIM_DMA_READ), cp->regs +
3828 REG_PCI_ERR_STATUS_MASK);
3829
3830 /* set up for MII by default to address mac rx reset timeout
3831 * issue
3832 */
3833 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3834}
3835
3836static void cas_reset(struct cas *cp, int blkflag)
3837{
3838 u32 val;
3839
3840 cas_mask_intr(cp);
3841 cas_global_reset(cp, blkflag);
3842 cas_mac_reset(cp);
3843 cas_entropy_reset(cp);
3844
3845 /* disable dma engines. */
3846 val = readl(cp->regs + REG_TX_CFG);
3847 val &= ~TX_CFG_DMA_EN;
3848 writel(val, cp->regs + REG_TX_CFG);
3849
3850 val = readl(cp->regs + REG_RX_CFG);
3851 val &= ~RX_CFG_DMA_EN;
3852 writel(val, cp->regs + REG_RX_CFG);
3853
3854 /* program header parser */
3855 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3856 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3857 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3858 } else {
3859 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3860 }
3861
3862 /* clear out error registers */
3863 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3864 cas_clear_mac_err(cp);
3865 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3866}
3867
3868/* Shut down the chip, must be called with pm_sem held. */
3869static void cas_shutdown(struct cas *cp)
3870{
3871 unsigned long flags;
3872
3873 /* Make us not-running to avoid timers respawning */
3874 cp->hw_running = 0;
3875
3876 del_timer_sync(&cp->link_timer);
3877
3878 /* Stop the reset task */
3879#if 0
3880 while (atomic_read(&cp->reset_task_pending_mtu) ||
3881 atomic_read(&cp->reset_task_pending_spare) ||
3882 atomic_read(&cp->reset_task_pending_all))
3883 schedule();
3884
3885#else
3886 while (atomic_read(&cp->reset_task_pending))
3887 schedule();
3888#endif
3889 /* Actually stop the chip */
3890 cas_lock_all_save(cp, flags);
3891 cas_reset(cp, 0);
3892 if (cp->cas_flags & CAS_FLAG_SATURN)
3893 cas_phy_powerdown(cp);
3894 cas_unlock_all_restore(cp, flags);
3895}
3896
3897static int cas_change_mtu(struct net_device *dev, int new_mtu)
3898{
3899 struct cas *cp = netdev_priv(dev);
3900
3901 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3902 return -EINVAL;
3903
3904 dev->mtu = new_mtu;
3905 if (!netif_running(dev) || !netif_device_present(dev))
3906 return 0;
3907
3908 /* let the reset task handle it */
3909#if 1
3910 atomic_inc(&cp->reset_task_pending);
3911 if ((cp->phy_type & CAS_PHY_SERDES)) {
3912 atomic_inc(&cp->reset_task_pending_all);
3913 } else {
3914 atomic_inc(&cp->reset_task_pending_mtu);
3915 }
3916 schedule_work(&cp->reset_task);
3917#else
3918 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3919 CAS_RESET_ALL : CAS_RESET_MTU);
3920 printk(KERN_ERR "reset called in cas_change_mtu\n");
3921 schedule_work(&cp->reset_task);
3922#endif
3923
3924 flush_scheduled_work();
3925 return 0;
3926}
3927
3928static void cas_clean_txd(struct cas *cp, int ring)
3929{
3930 struct cas_tx_desc *txd = cp->init_txds[ring];
3931 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3932 u64 daddr, dlen;
3933 int i, size;
3934
3935 size = TX_DESC_RINGN_SIZE(ring);
3936 for (i = 0; i < size; i++) {
3937 int frag;
3938
3939 if (skbs[i] == NULL)
3940 continue;
3941
3942 skb = skbs[i];
3943 skbs[i] = NULL;
3944
3945 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3946 int ent = i & (size - 1);
3947
3948 /* first buffer is never a tiny buffer and so
3949 * needs to be unmapped.
3950 */
3951 daddr = le64_to_cpu(txd[ent].buffer);
3952 dlen = CAS_VAL(TX_DESC_BUFLEN,
3953 le64_to_cpu(txd[ent].control));
3954 pci_unmap_page(cp->pdev, daddr, dlen,
3955 PCI_DMA_TODEVICE);
3956
3957 if (frag != skb_shinfo(skb)->nr_frags) {
3958 i++;
3959
3960 /* next buffer might by a tiny buffer.
3961 * skip past it.
3962 */
3963 ent = i & (size - 1);
3964 if (cp->tx_tiny_use[ring][ent].used)
3965 i++;
3966 }
3967 }
3968 dev_kfree_skb_any(skb);
3969 }
3970
3971 /* zero out tiny buf usage */
3972 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3973}
3974
3975/* freed on close */
3976static inline void cas_free_rx_desc(struct cas *cp, int ring)
3977{
3978 cas_page_t **page = cp->rx_pages[ring];
3979 int i, size;
3980
3981 size = RX_DESC_RINGN_SIZE(ring);
3982 for (i = 0; i < size; i++) {
3983 if (page[i]) {
3984 cas_page_free(cp, page[i]);
3985 page[i] = NULL;
3986 }
3987 }
3988}
3989
3990static void cas_free_rxds(struct cas *cp)
3991{
3992 int i;
3993
3994 for (i = 0; i < N_RX_DESC_RINGS; i++)
3995 cas_free_rx_desc(cp, i);
3996}
3997
3998/* Must be invoked under cp->lock. */
3999static void cas_clean_rings(struct cas *cp)
4000{
4001 int i;
4002
4003 /* need to clean all tx rings */
4004 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
4005 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
4006 for (i = 0; i < N_TX_RINGS; i++)
4007 cas_clean_txd(cp, i);
4008
4009 /* zero out init block */
4010 memset(cp->init_block, 0, sizeof(struct cas_init_block));
4011 cas_clean_rxds(cp);
4012 cas_clean_rxcs(cp);
4013}
4014
4015/* allocated on open */
4016static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4017{
4018 cas_page_t **page = cp->rx_pages[ring];
4019 int size, i = 0;
4020
4021 size = RX_DESC_RINGN_SIZE(ring);
4022 for (i = 0; i < size; i++) {
4023 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4024 return -1;
4025 }
4026 return 0;
4027}
4028
4029static int cas_alloc_rxds(struct cas *cp)
4030{
4031 int i;
4032
4033 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4034 if (cas_alloc_rx_desc(cp, i) < 0) {
4035 cas_free_rxds(cp);
4036 return -1;
4037 }
4038 }
4039 return 0;
4040}
4041
4042static void cas_reset_task(void *data)
4043{
4044 struct cas *cp = (struct cas *) data;
4045#if 0
4046 int pending = atomic_read(&cp->reset_task_pending);
4047#else
4048 int pending_all = atomic_read(&cp->reset_task_pending_all);
4049 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4050 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4051
4052 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4053 /* We can have more tasks scheduled than actually
4054 * needed.
4055 */
4056 atomic_dec(&cp->reset_task_pending);
4057 return;
4058 }
4059#endif
4060 /* The link went down, we reset the ring, but keep
4061 * DMA stopped. Use this function for reset
4062 * on error as well.
4063 */
4064 if (cp->hw_running) {
4065 unsigned long flags;
4066
4067 /* Make sure we don't get interrupts or tx packets */
4068 netif_device_detach(cp->dev);
4069 cas_lock_all_save(cp, flags);
4070
4071 if (cp->opened) {
4072 /* We call cas_spare_recover when we call cas_open.
4073 * but we do not initialize the lists cas_spare_recover
4074 * uses until cas_open is called.
4075 */
4076 cas_spare_recover(cp, GFP_ATOMIC);
4077 }
4078#if 1
4079 /* test => only pending_spare set */
4080 if (!pending_all && !pending_mtu)
4081 goto done;
4082#else
4083 if (pending == CAS_RESET_SPARE)
4084 goto done;
4085#endif
4086 /* when pending == CAS_RESET_ALL, the following
4087 * call to cas_init_hw will restart auto negotiation.
4088 * Setting the second argument of cas_reset to
4089 * !(pending == CAS_RESET_ALL) will set this argument
4090 * to 1 (avoiding reinitializing the PHY for the normal
4091 * PCS case) when auto negotiation is not restarted.
4092 */
4093#if 1
4094 cas_reset(cp, !(pending_all > 0));
4095 if (cp->opened)
4096 cas_clean_rings(cp);
4097 cas_init_hw(cp, (pending_all > 0));
4098#else
4099 cas_reset(cp, !(pending == CAS_RESET_ALL));
4100 if (cp->opened)
4101 cas_clean_rings(cp);
4102 cas_init_hw(cp, pending == CAS_RESET_ALL);
4103#endif
4104
4105done:
4106 cas_unlock_all_restore(cp, flags);
4107 netif_device_attach(cp->dev);
4108 }
4109#if 1
4110 atomic_sub(pending_all, &cp->reset_task_pending_all);
4111 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4112 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4113 atomic_dec(&cp->reset_task_pending);
4114#else
4115 atomic_set(&cp->reset_task_pending, 0);
4116#endif
4117}
4118
4119static void cas_link_timer(unsigned long data)
4120{
4121 struct cas *cp = (struct cas *) data;
4122 int mask, pending = 0, reset = 0;
4123 unsigned long flags;
4124
4125 if (link_transition_timeout != 0 &&
4126 cp->link_transition_jiffies_valid &&
4127 ((jiffies - cp->link_transition_jiffies) >
4128 (link_transition_timeout))) {
4129 /* One-second counter so link-down workaround doesn't
4130 * cause resets to occur so fast as to fool the switch
4131 * into thinking the link is down.
4132 */
4133 cp->link_transition_jiffies_valid = 0;
4134 }
4135
4136 if (!cp->hw_running)
4137 return;
4138
4139 spin_lock_irqsave(&cp->lock, flags);
4140 cas_lock_tx(cp);
4141 cas_entropy_gather(cp);
4142
4143 /* If the link task is still pending, we just
4144 * reschedule the link timer
4145 */
4146#if 1
4147 if (atomic_read(&cp->reset_task_pending_all) ||
4148 atomic_read(&cp->reset_task_pending_spare) ||
4149 atomic_read(&cp->reset_task_pending_mtu))
4150 goto done;
4151#else
4152 if (atomic_read(&cp->reset_task_pending))
4153 goto done;
4154#endif
4155
4156 /* check for rx cleaning */
4157 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4158 int i, rmask;
4159
4160 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4161 rmask = CAS_FLAG_RXD_POST(i);
4162 if ((mask & rmask) == 0)
4163 continue;
4164
4165 /* post_rxds will do a mod_timer */
4166 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4167 pending = 1;
4168 continue;
4169 }
4170 cp->cas_flags &= ~rmask;
4171 }
4172 }
4173
4174 if (CAS_PHY_MII(cp->phy_type)) {
4175 u16 bmsr;
4176 cas_mif_poll(cp, 0);
4177 bmsr = cas_phy_read(cp, MII_BMSR);
4178 /* WTZ: Solaris driver reads this twice, but that
4179 * may be due to the PCS case and the use of a
4180 * common implementation. Read it twice here to be
4181 * safe.
4182 */
4183 bmsr = cas_phy_read(cp, MII_BMSR);
4184 cas_mif_poll(cp, 1);
4185 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4186 reset = cas_mii_link_check(cp, bmsr);
4187 } else {
4188 reset = cas_pcs_link_check(cp);
4189 }
4190
4191 if (reset)
4192 goto done;
4193
4194 /* check for tx state machine confusion */
4195 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4196 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4197 u32 wptr, rptr;
4198 int tlm = CAS_VAL(MAC_SM_TLM, val);
4199
4200 if (((tlm == 0x5) || (tlm == 0x3)) &&
4201 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4202 if (netif_msg_tx_err(cp))
4203 printk(KERN_DEBUG "%s: tx err: "
4204 "MAC_STATE[%08x]\n",
4205 cp->dev->name, val);
4206 reset = 1;
4207 goto done;
4208 }
4209
4210 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4211 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4212 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4213 if ((val == 0) && (wptr != rptr)) {
4214 if (netif_msg_tx_err(cp))
4215 printk(KERN_DEBUG "%s: tx err: "
4216 "TX_FIFO[%08x:%08x:%08x]\n",
4217 cp->dev->name, val, wptr, rptr);
4218 reset = 1;
4219 }
4220
4221 if (reset)
4222 cas_hard_reset(cp);
4223 }
4224
4225done:
4226 if (reset) {
4227#if 1
4228 atomic_inc(&cp->reset_task_pending);
4229 atomic_inc(&cp->reset_task_pending_all);
4230 schedule_work(&cp->reset_task);
4231#else
4232 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4233 printk(KERN_ERR "reset called in cas_link_timer\n");
4234 schedule_work(&cp->reset_task);
4235#endif
4236 }
4237
4238 if (!pending)
4239 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4240 cas_unlock_tx(cp);
4241 spin_unlock_irqrestore(&cp->lock, flags);
4242}
4243
4244/* tiny buffers are used to avoid target abort issues with
4245 * older cassini's
4246 */
4247static void cas_tx_tiny_free(struct cas *cp)
4248{
4249 struct pci_dev *pdev = cp->pdev;
4250 int i;
4251
4252 for (i = 0; i < N_TX_RINGS; i++) {
4253 if (!cp->tx_tiny_bufs[i])
4254 continue;
4255
4256 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4257 cp->tx_tiny_bufs[i],
4258 cp->tx_tiny_dvma[i]);
4259 cp->tx_tiny_bufs[i] = NULL;
4260 }
4261}
4262
4263static int cas_tx_tiny_alloc(struct cas *cp)
4264{
4265 struct pci_dev *pdev = cp->pdev;
4266 int i;
4267
4268 for (i = 0; i < N_TX_RINGS; i++) {
4269 cp->tx_tiny_bufs[i] =
4270 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4271 &cp->tx_tiny_dvma[i]);
4272 if (!cp->tx_tiny_bufs[i]) {
4273 cas_tx_tiny_free(cp);
4274 return -1;
4275 }
4276 }
4277 return 0;
4278}
4279
4280
4281static int cas_open(struct net_device *dev)
4282{
4283 struct cas *cp = netdev_priv(dev);
4284 int hw_was_up, err;
4285 unsigned long flags;
4286
4287 down(&cp->pm_sem);
4288
4289 hw_was_up = cp->hw_running;
4290
4291 /* The power-management semaphore protects the hw_running
4292 * etc. state so it is safe to do this bit without cp->lock
4293 */
4294 if (!cp->hw_running) {
4295 /* Reset the chip */
4296 cas_lock_all_save(cp, flags);
4297 /* We set the second arg to cas_reset to zero
4298 * because cas_init_hw below will have its second
4299 * argument set to non-zero, which will force
4300 * autonegotiation to start.
4301 */
4302 cas_reset(cp, 0);
4303 cp->hw_running = 1;
4304 cas_unlock_all_restore(cp, flags);
4305 }
4306
4307 if (cas_tx_tiny_alloc(cp) < 0)
4308 return -ENOMEM;
4309
4310 /* alloc rx descriptors */
4311 err = -ENOMEM;
4312 if (cas_alloc_rxds(cp) < 0)
4313 goto err_tx_tiny;
4314
4315 /* allocate spares */
4316 cas_spare_init(cp);
4317 cas_spare_recover(cp, GFP_KERNEL);
4318
4319 /* We can now request the interrupt as we know it's masked
4320 * on the controller. cassini+ has up to 4 interrupts
4321 * that can be used, but you need to do explicit pci interrupt
4322 * mapping to expose them
4323 */
4324 if (request_irq(cp->pdev->irq, cas_interrupt,
4325 SA_SHIRQ, dev->name, (void *) dev)) {
4326 printk(KERN_ERR "%s: failed to request irq !\n",
4327 cp->dev->name);
4328 err = -EAGAIN;
4329 goto err_spare;
4330 }
4331
4332 /* init hw */
4333 cas_lock_all_save(cp, flags);
4334 cas_clean_rings(cp);
4335 cas_init_hw(cp, !hw_was_up);
4336 cp->opened = 1;
4337 cas_unlock_all_restore(cp, flags);
4338
4339 netif_start_queue(dev);
4340 up(&cp->pm_sem);
4341 return 0;
4342
4343err_spare:
4344 cas_spare_free(cp);
4345 cas_free_rxds(cp);
4346err_tx_tiny:
4347 cas_tx_tiny_free(cp);
4348 up(&cp->pm_sem);
4349 return err;
4350}
4351
4352static int cas_close(struct net_device *dev)
4353{
4354 unsigned long flags;
4355 struct cas *cp = netdev_priv(dev);
4356
4357 /* Make sure we don't get distracted by suspend/resume */
4358 down(&cp->pm_sem);
4359
4360 netif_stop_queue(dev);
4361
4362 /* Stop traffic, mark us closed */
4363 cas_lock_all_save(cp, flags);
4364 cp->opened = 0;
4365 cas_reset(cp, 0);
4366 cas_phy_init(cp);
4367 cas_begin_auto_negotiation(cp, NULL);
4368 cas_clean_rings(cp);
4369 cas_unlock_all_restore(cp, flags);
4370
4371 free_irq(cp->pdev->irq, (void *) dev);
4372 cas_spare_free(cp);
4373 cas_free_rxds(cp);
4374 cas_tx_tiny_free(cp);
4375 up(&cp->pm_sem);
4376 return 0;
4377}
4378
4379static struct {
4380 const char name[ETH_GSTRING_LEN];
4381} ethtool_cassini_statnames[] = {
4382 {"collisions"},
4383 {"rx_bytes"},
4384 {"rx_crc_errors"},
4385 {"rx_dropped"},
4386 {"rx_errors"},
4387 {"rx_fifo_errors"},
4388 {"rx_frame_errors"},
4389 {"rx_length_errors"},
4390 {"rx_over_errors"},
4391 {"rx_packets"},
4392 {"tx_aborted_errors"},
4393 {"tx_bytes"},
4394 {"tx_dropped"},
4395 {"tx_errors"},
4396 {"tx_fifo_errors"},
4397 {"tx_packets"}
4398};
4399#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
4400
4401static struct {
4402 const int offsets; /* neg. values for 2nd arg to cas_read_phy */
4403} ethtool_register_table[] = {
4404 {-MII_BMSR},
4405 {-MII_BMCR},
4406 {REG_CAWR},
4407 {REG_INF_BURST},
4408 {REG_BIM_CFG},
4409 {REG_RX_CFG},
4410 {REG_HP_CFG},
4411 {REG_MAC_TX_CFG},
4412 {REG_MAC_RX_CFG},
4413 {REG_MAC_CTRL_CFG},
4414 {REG_MAC_XIF_CFG},
4415 {REG_MIF_CFG},
4416 {REG_PCS_CFG},
4417 {REG_SATURN_PCFG},
4418 {REG_PCS_MII_STATUS},
4419 {REG_PCS_STATE_MACHINE},
4420 {REG_MAC_COLL_EXCESS},
4421 {REG_MAC_COLL_LATE}
4422};
4423#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
4424#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4425
4426static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4427{
4428 u8 *p;
4429 int i;
4430 unsigned long flags;
4431
4432 spin_lock_irqsave(&cp->lock, flags);
4433 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4434 u16 hval;
4435 u32 val;
4436 if (ethtool_register_table[i].offsets < 0) {
4437 hval = cas_phy_read(cp,
4438 -ethtool_register_table[i].offsets);
4439 val = hval;
4440 } else {
4441 val= readl(cp->regs+ethtool_register_table[i].offsets);
4442 }
4443 memcpy(p, (u8 *)&val, sizeof(u32));
4444 }
4445 spin_unlock_irqrestore(&cp->lock, flags);
4446}
4447
4448static struct net_device_stats *cas_get_stats(struct net_device *dev)
4449{
4450 struct cas *cp = netdev_priv(dev);
4451 struct net_device_stats *stats = cp->net_stats;
4452 unsigned long flags;
4453 int i;
4454 unsigned long tmp;
4455
4456 /* we collate all of the stats into net_stats[N_TX_RING] */
4457 if (!cp->hw_running)
4458 return stats + N_TX_RINGS;
4459
4460 /* collect outstanding stats */
4461 /* WTZ: the Cassini spec gives these as 16 bit counters but
4462 * stored in 32-bit words. Added a mask of 0xffff to be safe,
4463 * in case the chip somehow puts any garbage in the other bits.
4464 * Also, counter usage didn't seem to mach what Adrian did
4465 * in the parts of the code that set these quantities. Made
4466 * that consistent.
4467 */
4468 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4469 stats[N_TX_RINGS].rx_crc_errors +=
4470 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4471 stats[N_TX_RINGS].rx_frame_errors +=
4472 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4473 stats[N_TX_RINGS].rx_length_errors +=
4474 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4475#if 1
4476 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4477 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4478 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4479 stats[N_TX_RINGS].collisions +=
4480 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4481#else
4482 stats[N_TX_RINGS].tx_aborted_errors +=
4483 readl(cp->regs + REG_MAC_COLL_EXCESS);
4484 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4485 readl(cp->regs + REG_MAC_COLL_LATE);
4486#endif
4487 cas_clear_mac_err(cp);
4488
4489 /* saved bits that are unique to ring 0 */
4490 spin_lock(&cp->stat_lock[0]);
4491 stats[N_TX_RINGS].collisions += stats[0].collisions;
4492 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4493 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4494 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4495 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4496 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4497 spin_unlock(&cp->stat_lock[0]);
4498
4499 for (i = 0; i < N_TX_RINGS; i++) {
4500 spin_lock(&cp->stat_lock[i]);
4501 stats[N_TX_RINGS].rx_length_errors +=
4502 stats[i].rx_length_errors;
4503 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4504 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4505 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4506 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4507 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4508 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4509 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4510 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4511 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4512 memset(stats + i, 0, sizeof(struct net_device_stats));
4513 spin_unlock(&cp->stat_lock[i]);
4514 }
4515 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4516 return stats + N_TX_RINGS;
4517}
4518
4519
4520static void cas_set_multicast(struct net_device *dev)
4521{
4522 struct cas *cp = netdev_priv(dev);
4523 u32 rxcfg, rxcfg_new;
4524 unsigned long flags;
4525 int limit = STOP_TRIES;
4526
4527 if (!cp->hw_running)
4528 return;
4529
4530 spin_lock_irqsave(&cp->lock, flags);
4531 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4532
4533 /* disable RX MAC and wait for completion */
4534 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4535 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4536 if (!limit--)
4537 break;
4538 udelay(10);
4539 }
4540
4541 /* disable hash filter and wait for completion */
4542 limit = STOP_TRIES;
4543 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4544 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4545 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4546 if (!limit--)
4547 break;
4548 udelay(10);
4549 }
4550
4551 /* program hash filters */
4552 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4553 rxcfg |= rxcfg_new;
4554 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4555 spin_unlock_irqrestore(&cp->lock, flags);
4556}
4557
4558static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4559{
4560 struct cas *cp = netdev_priv(dev);
4561 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4562 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4563 info->fw_version[0] = '\0';
4564 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4565 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4566 cp->casreg_len : CAS_MAX_REGS;
4567 info->n_stats = CAS_NUM_STAT_KEYS;
4568}
4569
4570static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4571{
4572 struct cas *cp = netdev_priv(dev);
4573 u16 bmcr;
4574 int full_duplex, speed, pause;
4575 unsigned long flags;
4576 enum link_state linkstate = link_up;
4577
4578 cmd->advertising = 0;
4579 cmd->supported = SUPPORTED_Autoneg;
4580 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4581 cmd->supported |= SUPPORTED_1000baseT_Full;
4582 cmd->advertising |= ADVERTISED_1000baseT_Full;
4583 }
4584
4585 /* Record PHY settings if HW is on. */
4586 spin_lock_irqsave(&cp->lock, flags);
4587 bmcr = 0;
4588 linkstate = cp->lstate;
4589 if (CAS_PHY_MII(cp->phy_type)) {
4590 cmd->port = PORT_MII;
4591 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4592 XCVR_INTERNAL : XCVR_EXTERNAL;
4593 cmd->phy_address = cp->phy_addr;
4594 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4595 ADVERTISED_10baseT_Half |
4596 ADVERTISED_10baseT_Full |
4597 ADVERTISED_100baseT_Half |
4598 ADVERTISED_100baseT_Full;
4599
4600 cmd->supported |=
4601 (SUPPORTED_10baseT_Half |
4602 SUPPORTED_10baseT_Full |
4603 SUPPORTED_100baseT_Half |
4604 SUPPORTED_100baseT_Full |
4605 SUPPORTED_TP | SUPPORTED_MII);
4606
4607 if (cp->hw_running) {
4608 cas_mif_poll(cp, 0);
4609 bmcr = cas_phy_read(cp, MII_BMCR);
4610 cas_read_mii_link_mode(cp, &full_duplex,
4611 &speed, &pause);
4612 cas_mif_poll(cp, 1);
4613 }
4614
4615 } else {
4616 cmd->port = PORT_FIBRE;
4617 cmd->transceiver = XCVR_INTERNAL;
4618 cmd->phy_address = 0;
4619 cmd->supported |= SUPPORTED_FIBRE;
4620 cmd->advertising |= ADVERTISED_FIBRE;
4621
4622 if (cp->hw_running) {
4623 /* pcs uses the same bits as mii */
4624 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4625 cas_read_pcs_link_mode(cp, &full_duplex,
4626 &speed, &pause);
4627 }
4628 }
4629 spin_unlock_irqrestore(&cp->lock, flags);
4630
4631 if (bmcr & BMCR_ANENABLE) {
4632 cmd->advertising |= ADVERTISED_Autoneg;
4633 cmd->autoneg = AUTONEG_ENABLE;
4634 cmd->speed = ((speed == 10) ?
4635 SPEED_10 :
4636 ((speed == 1000) ?
4637 SPEED_1000 : SPEED_100));
4638 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4639 } else {
4640 cmd->autoneg = AUTONEG_DISABLE;
4641 cmd->speed =
4642 (bmcr & CAS_BMCR_SPEED1000) ?
4643 SPEED_1000 :
4644 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4645 SPEED_10);
4646 cmd->duplex =
4647 (bmcr & BMCR_FULLDPLX) ?
4648 DUPLEX_FULL : DUPLEX_HALF;
4649 }
4650 if (linkstate != link_up) {
4651 /* Force these to "unknown" if the link is not up and
4652 * autonogotiation in enabled. We can set the link
4653 * speed to 0, but not cmd->duplex,
4654 * because its legal values are 0 and 1. Ethtool will
4655 * print the value reported in parentheses after the
4656 * word "Unknown" for unrecognized values.
4657 *
4658 * If in forced mode, we report the speed and duplex
4659 * settings that we configured.
4660 */
4661 if (cp->link_cntl & BMCR_ANENABLE) {
4662 cmd->speed = 0;
4663 cmd->duplex = 0xff;
4664 } else {
4665 cmd->speed = SPEED_10;
4666 if (cp->link_cntl & BMCR_SPEED100) {
4667 cmd->speed = SPEED_100;
4668 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4669 cmd->speed = SPEED_1000;
4670 }
4671 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4672 DUPLEX_FULL : DUPLEX_HALF;
4673 }
4674 }
4675 return 0;
4676}
4677
4678static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4679{
4680 struct cas *cp = netdev_priv(dev);
4681 unsigned long flags;
4682
4683 /* Verify the settings we care about. */
4684 if (cmd->autoneg != AUTONEG_ENABLE &&
4685 cmd->autoneg != AUTONEG_DISABLE)
4686 return -EINVAL;
4687
4688 if (cmd->autoneg == AUTONEG_DISABLE &&
4689 ((cmd->speed != SPEED_1000 &&
4690 cmd->speed != SPEED_100 &&
4691 cmd->speed != SPEED_10) ||
4692 (cmd->duplex != DUPLEX_HALF &&
4693 cmd->duplex != DUPLEX_FULL)))
4694 return -EINVAL;
4695
4696 /* Apply settings and restart link process. */
4697 spin_lock_irqsave(&cp->lock, flags);
4698 cas_begin_auto_negotiation(cp, cmd);
4699 spin_unlock_irqrestore(&cp->lock, flags);
4700 return 0;
4701}
4702
4703static int cas_nway_reset(struct net_device *dev)
4704{
4705 struct cas *cp = netdev_priv(dev);
4706 unsigned long flags;
4707
4708 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4709 return -EINVAL;
4710
4711 /* Restart link process. */
4712 spin_lock_irqsave(&cp->lock, flags);
4713 cas_begin_auto_negotiation(cp, NULL);
4714 spin_unlock_irqrestore(&cp->lock, flags);
4715
4716 return 0;
4717}
4718
4719static u32 cas_get_link(struct net_device *dev)
4720{
4721 struct cas *cp = netdev_priv(dev);
4722 return cp->lstate == link_up;
4723}
4724
4725static u32 cas_get_msglevel(struct net_device *dev)
4726{
4727 struct cas *cp = netdev_priv(dev);
4728 return cp->msg_enable;
4729}
4730
4731static void cas_set_msglevel(struct net_device *dev, u32 value)
4732{
4733 struct cas *cp = netdev_priv(dev);
4734 cp->msg_enable = value;
4735}
4736
4737static int cas_get_regs_len(struct net_device *dev)
4738{
4739 struct cas *cp = netdev_priv(dev);
4740 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4741}
4742
4743static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4744 void *p)
4745{
4746 struct cas *cp = netdev_priv(dev);
4747 regs->version = 0;
4748 /* cas_read_regs handles locks (cp->lock). */
4749 cas_read_regs(cp, p, regs->len / sizeof(u32));
4750}
4751
4752static int cas_get_stats_count(struct net_device *dev)
4753{
4754 return CAS_NUM_STAT_KEYS;
4755}
4756
4757static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4758{
4759 memcpy(data, &ethtool_cassini_statnames,
4760 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4761}
4762
4763static void cas_get_ethtool_stats(struct net_device *dev,
4764 struct ethtool_stats *estats, u64 *data)
4765{
4766 struct cas *cp = netdev_priv(dev);
4767 struct net_device_stats *stats = cas_get_stats(cp->dev);
4768 int i = 0;
4769 data[i++] = stats->collisions;
4770 data[i++] = stats->rx_bytes;
4771 data[i++] = stats->rx_crc_errors;
4772 data[i++] = stats->rx_dropped;
4773 data[i++] = stats->rx_errors;
4774 data[i++] = stats->rx_fifo_errors;
4775 data[i++] = stats->rx_frame_errors;
4776 data[i++] = stats->rx_length_errors;
4777 data[i++] = stats->rx_over_errors;
4778 data[i++] = stats->rx_packets;
4779 data[i++] = stats->tx_aborted_errors;
4780 data[i++] = stats->tx_bytes;
4781 data[i++] = stats->tx_dropped;
4782 data[i++] = stats->tx_errors;
4783 data[i++] = stats->tx_fifo_errors;
4784 data[i++] = stats->tx_packets;
4785 BUG_ON(i != CAS_NUM_STAT_KEYS);
4786}
4787
4788static struct ethtool_ops cas_ethtool_ops = {
4789 .get_drvinfo = cas_get_drvinfo,
4790 .get_settings = cas_get_settings,
4791 .set_settings = cas_set_settings,
4792 .nway_reset = cas_nway_reset,
4793 .get_link = cas_get_link,
4794 .get_msglevel = cas_get_msglevel,
4795 .set_msglevel = cas_set_msglevel,
4796 .get_regs_len = cas_get_regs_len,
4797 .get_regs = cas_get_regs,
4798 .get_stats_count = cas_get_stats_count,
4799 .get_strings = cas_get_strings,
4800 .get_ethtool_stats = cas_get_ethtool_stats,
4801};
4802
4803static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4804{
4805 struct cas *cp = netdev_priv(dev);
4806 struct mii_ioctl_data *data = if_mii(ifr);
4807 unsigned long flags;
4808 int rc = -EOPNOTSUPP;
4809
4810 /* Hold the PM semaphore while doing ioctl's or we may collide
4811 * with open/close and power management and oops.
4812 */
4813 down(&cp->pm_sem);
4814 switch (cmd) {
4815 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4816 data->phy_id = cp->phy_addr;
4817 /* Fallthrough... */
4818
4819 case SIOCGMIIREG: /* Read MII PHY register. */
4820 spin_lock_irqsave(&cp->lock, flags);
4821 cas_mif_poll(cp, 0);
4822 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4823 cas_mif_poll(cp, 1);
4824 spin_unlock_irqrestore(&cp->lock, flags);
4825 rc = 0;
4826 break;
4827
4828 case SIOCSMIIREG: /* Write MII PHY register. */
4829 if (!capable(CAP_NET_ADMIN)) {
4830 rc = -EPERM;
4831 break;
4832 }
4833 spin_lock_irqsave(&cp->lock, flags);
4834 cas_mif_poll(cp, 0);
4835 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4836 cas_mif_poll(cp, 1);
4837 spin_unlock_irqrestore(&cp->lock, flags);
4838 break;
4839 default:
4840 break;
4841 };
4842
4843 up(&cp->pm_sem);
4844 return rc;
4845}
4846
4847static int __devinit cas_init_one(struct pci_dev *pdev,
4848 const struct pci_device_id *ent)
4849{
4850 static int cas_version_printed = 0;
4851 unsigned long casreg_base, casreg_len;
4852 struct net_device *dev;
4853 struct cas *cp;
4854 int i, err, pci_using_dac;
4855 u16 pci_cmd;
4856 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4857
4858 if (cas_version_printed++ == 0)
4859 printk(KERN_INFO "%s", version);
4860
4861 err = pci_enable_device(pdev);
4862 if (err) {
4863 printk(KERN_ERR PFX "Cannot enable PCI device, "
4864 "aborting.\n");
4865 return err;
4866 }
4867
4868 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4869 printk(KERN_ERR PFX "Cannot find proper PCI device "
4870 "base address, aborting.\n");
4871 err = -ENODEV;
4872 goto err_out_disable_pdev;
4873 }
4874
4875 dev = alloc_etherdev(sizeof(*cp));
4876 if (!dev) {
4877 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
4878 err = -ENOMEM;
4879 goto err_out_disable_pdev;
4880 }
4881 SET_MODULE_OWNER(dev);
4882 SET_NETDEV_DEV(dev, &pdev->dev);
4883
4884 err = pci_request_regions(pdev, dev->name);
4885 if (err) {
4886 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
4887 "aborting.\n");
4888 goto err_out_free_netdev;
4889 }
4890 pci_set_master(pdev);
4891
4892 /* we must always turn on parity response or else parity
4893 * doesn't get generated properly. disable SERR/PERR as well.
4894 * in addition, we want to turn MWI on.
4895 */
4896 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4897 pci_cmd &= ~PCI_COMMAND_SERR;
4898 pci_cmd |= PCI_COMMAND_PARITY;
4899 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4900 pci_set_mwi(pdev);
4901 /*
4902 * On some architectures, the default cache line size set
4903 * by pci_set_mwi reduces perforamnce. We have to increase
4904 * it for this case. To start, we'll print some configuration
4905 * data.
4906 */
4907#if 1
4908 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4909 &orig_cacheline_size);
4910 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4911 cas_cacheline_size =
4912 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4913 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4914 if (pci_write_config_byte(pdev,
4915 PCI_CACHE_LINE_SIZE,
4916 cas_cacheline_size)) {
4917 printk(KERN_ERR PFX "Could not set PCI cache "
4918 "line size\n");
4919 goto err_write_cacheline;
4920 }
4921 }
4922#endif
4923
4924
4925 /* Configure DMA attributes. */
4926 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4927 pci_using_dac = 1;
4928 err = pci_set_consistent_dma_mask(pdev,
4929 DMA_64BIT_MASK);
4930 if (err < 0) {
4931 printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
4932 "for consistent allocations\n");
4933 goto err_out_free_res;
4934 }
4935
4936 } else {
4937 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4938 if (err) {
4939 printk(KERN_ERR PFX "No usable DMA configuration, "
4940 "aborting.\n");
4941 goto err_out_free_res;
4942 }
4943 pci_using_dac = 0;
4944 }
4945
4946 casreg_base = pci_resource_start(pdev, 0);
4947 casreg_len = pci_resource_len(pdev, 0);
4948
4949 cp = netdev_priv(dev);
4950 cp->pdev = pdev;
4951#if 1
4952 /* A value of 0 indicates we never explicitly set it */
4953 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4954#endif
4955 cp->dev = dev;
4956 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4957 cassini_debug;
4958
4959 cp->link_transition = LINK_TRANSITION_UNKNOWN;
4960 cp->link_transition_jiffies_valid = 0;
4961
4962 spin_lock_init(&cp->lock);
4963 spin_lock_init(&cp->rx_inuse_lock);
4964 spin_lock_init(&cp->rx_spare_lock);
4965 for (i = 0; i < N_TX_RINGS; i++) {
4966 spin_lock_init(&cp->stat_lock[i]);
4967 spin_lock_init(&cp->tx_lock[i]);
4968 }
4969 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4970 init_MUTEX(&cp->pm_sem);
4971
4972 init_timer(&cp->link_timer);
4973 cp->link_timer.function = cas_link_timer;
4974 cp->link_timer.data = (unsigned long) cp;
4975
4976#if 1
4977 /* Just in case the implementation of atomic operations
4978 * change so that an explicit initialization is necessary.
4979 */
4980 atomic_set(&cp->reset_task_pending, 0);
4981 atomic_set(&cp->reset_task_pending_all, 0);
4982 atomic_set(&cp->reset_task_pending_spare, 0);
4983 atomic_set(&cp->reset_task_pending_mtu, 0);
4984#endif
4985 INIT_WORK(&cp->reset_task, cas_reset_task, cp);
4986
4987 /* Default link parameters */
4988 if (link_mode >= 0 && link_mode <= 6)
4989 cp->link_cntl = link_modes[link_mode];
4990 else
4991 cp->link_cntl = BMCR_ANENABLE;
4992 cp->lstate = link_down;
4993 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4994 netif_carrier_off(cp->dev);
4995 cp->timer_ticks = 0;
4996
4997 /* give us access to cassini registers */
4998 cp->regs = ioremap(casreg_base, casreg_len);
4999 if (cp->regs == 0UL) {
5000 printk(KERN_ERR PFX "Cannot map device registers, "
5001 "aborting.\n");
5002 goto err_out_free_res;
5003 }
5004 cp->casreg_len = casreg_len;
5005
5006 pci_save_state(pdev);
5007 cas_check_pci_invariants(cp);
5008 cas_hard_reset(cp);
5009 cas_reset(cp, 0);
5010 if (cas_check_invariants(cp))
5011 goto err_out_iounmap;
5012
5013 cp->init_block = (struct cas_init_block *)
5014 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5015 &cp->block_dvma);
5016 if (!cp->init_block) {
5017 printk(KERN_ERR PFX "Cannot allocate init block, "
5018 "aborting.\n");
5019 goto err_out_iounmap;
5020 }
5021
5022 for (i = 0; i < N_TX_RINGS; i++)
5023 cp->init_txds[i] = cp->init_block->txds[i];
5024
5025 for (i = 0; i < N_RX_DESC_RINGS; i++)
5026 cp->init_rxds[i] = cp->init_block->rxds[i];
5027
5028 for (i = 0; i < N_RX_COMP_RINGS; i++)
5029 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5030
5031 for (i = 0; i < N_RX_FLOWS; i++)
5032 skb_queue_head_init(&cp->rx_flows[i]);
5033
5034 dev->open = cas_open;
5035 dev->stop = cas_close;
5036 dev->hard_start_xmit = cas_start_xmit;
5037 dev->get_stats = cas_get_stats;
5038 dev->set_multicast_list = cas_set_multicast;
5039 dev->do_ioctl = cas_ioctl;
5040 dev->ethtool_ops = &cas_ethtool_ops;
5041 dev->tx_timeout = cas_tx_timeout;
5042 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5043 dev->change_mtu = cas_change_mtu;
5044#ifdef USE_NAPI
5045 dev->poll = cas_poll;
5046 dev->weight = 64;
5047#endif
5048#ifdef CONFIG_NET_POLL_CONTROLLER
5049 dev->poll_controller = cas_netpoll;
5050#endif
5051 dev->irq = pdev->irq;
5052 dev->dma = 0;
5053
5054 /* Cassini features. */
5055 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5056 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5057
5058 if (pci_using_dac)
5059 dev->features |= NETIF_F_HIGHDMA;
5060
5061 if (register_netdev(dev)) {
5062 printk(KERN_ERR PFX "Cannot register net device, "
5063 "aborting.\n");
5064 goto err_out_free_consistent;
5065 }
5066
5067 i = readl(cp->regs + REG_BIM_CFG);
5068 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5069 "Ethernet[%d] ", dev->name,
5070 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5071 (i & BIM_CFG_32BIT) ? "32" : "64",
5072 (i & BIM_CFG_66MHZ) ? "66" : "33",
5073 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
5074
5075 for (i = 0; i < 6; i++)
5076 printk("%2.2x%c", dev->dev_addr[i],
5077 i == 5 ? ' ' : ':');
5078 printk("\n");
5079
5080 pci_set_drvdata(pdev, dev);
5081 cp->hw_running = 1;
5082 cas_entropy_reset(cp);
5083 cas_phy_init(cp);
5084 cas_begin_auto_negotiation(cp, NULL);
5085 return 0;
5086
5087err_out_free_consistent:
5088 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5089 cp->init_block, cp->block_dvma);
5090
5091err_out_iounmap:
5092 down(&cp->pm_sem);
5093 if (cp->hw_running)
5094 cas_shutdown(cp);
5095 up(&cp->pm_sem);
5096
5097 iounmap(cp->regs);
5098
5099
5100err_out_free_res:
5101 pci_release_regions(pdev);
5102
5103err_write_cacheline:
5104 /* Try to restore it in case the error occured after we
5105 * set it.
5106 */
5107 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5108
5109err_out_free_netdev:
5110 free_netdev(dev);
5111
5112err_out_disable_pdev:
5113 pci_disable_device(pdev);
5114 pci_set_drvdata(pdev, NULL);
5115 return -ENODEV;
5116}
5117
5118static void __devexit cas_remove_one(struct pci_dev *pdev)
5119{
5120 struct net_device *dev = pci_get_drvdata(pdev);
5121 struct cas *cp;
5122 if (!dev)
5123 return;
5124
5125 cp = netdev_priv(dev);
5126 unregister_netdev(dev);
5127
5128 down(&cp->pm_sem);
5129 flush_scheduled_work();
5130 if (cp->hw_running)
5131 cas_shutdown(cp);
5132 up(&cp->pm_sem);
5133
5134#if 1
5135 if (cp->orig_cacheline_size) {
5136 /* Restore the cache line size if we had modified
5137 * it.
5138 */
5139 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5140 cp->orig_cacheline_size);
5141 }
5142#endif
5143 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5144 cp->init_block, cp->block_dvma);
5145 iounmap(cp->regs);
5146 free_netdev(dev);
5147 pci_release_regions(pdev);
5148 pci_disable_device(pdev);
5149 pci_set_drvdata(pdev, NULL);
5150}
5151
5152#ifdef CONFIG_PM
5153static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5154{
5155 struct net_device *dev = pci_get_drvdata(pdev);
5156 struct cas *cp = netdev_priv(dev);
5157 unsigned long flags;
5158
5159 /* We hold the PM semaphore during entire driver
5160 * sleep time
5161 */
5162 down(&cp->pm_sem);
5163
5164 /* If the driver is opened, we stop the DMA */
5165 if (cp->opened) {
5166 netif_device_detach(dev);
5167
5168 cas_lock_all_save(cp, flags);
5169
5170 /* We can set the second arg of cas_reset to 0
5171 * because on resume, we'll call cas_init_hw with
5172 * its second arg set so that autonegotiation is
5173 * restarted.
5174 */
5175 cas_reset(cp, 0);
5176 cas_clean_rings(cp);
5177 cas_unlock_all_restore(cp, flags);
5178 }
5179
5180 if (cp->hw_running)
5181 cas_shutdown(cp);
5182
5183 return 0;
5184}
5185
5186static int cas_resume(struct pci_dev *pdev)
5187{
5188 struct net_device *dev = pci_get_drvdata(pdev);
5189 struct cas *cp = netdev_priv(dev);
5190
5191 printk(KERN_INFO "%s: resuming\n", dev->name);
5192
5193 cas_hard_reset(cp);
5194 if (cp->opened) {
5195 unsigned long flags;
5196 cas_lock_all_save(cp, flags);
5197 cas_reset(cp, 0);
5198 cp->hw_running = 1;
5199 cas_clean_rings(cp);
5200 cas_init_hw(cp, 1);
5201 cas_unlock_all_restore(cp, flags);
5202
5203 netif_device_attach(dev);
5204 }
5205 up(&cp->pm_sem);
5206 return 0;
5207}
5208#endif /* CONFIG_PM */
5209
5210static struct pci_driver cas_driver = {
5211 .name = DRV_MODULE_NAME,
5212 .id_table = cas_pci_tbl,
5213 .probe = cas_init_one,
5214 .remove = __devexit_p(cas_remove_one),
5215#ifdef CONFIG_PM
5216 .suspend = cas_suspend,
5217 .resume = cas_resume
5218#endif
5219};
5220
5221static int __init cas_init(void)
5222{
5223 if (linkdown_timeout > 0)
5224 link_transition_timeout = linkdown_timeout * HZ;
5225 else
5226 link_transition_timeout = 0;
5227
5228 return pci_module_init(&cas_driver);
5229}
5230
5231static void __exit cas_cleanup(void)
5232{
5233 pci_unregister_driver(&cas_driver);
5234}
5235
5236module_init(cas_init);
5237module_exit(cas_cleanup);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
new file mode 100644
index 000000000000..88063ef16cf6
--- /dev/null
+++ b/drivers/net/cassini.h
@@ -0,0 +1,4425 @@
1/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
2 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
3 *
4 * Copyright (C) 2004 Sun Microsystems Inc.
5 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
20 * 02111-1307, USA.
21 *
22 * vendor id: 0x108E (Sun Microsystems, Inc.)
23 * device id: 0xabba (Cassini)
24 * revision ids: 0x01 = Cassini
25 * 0x02 = Cassini rev 2
26 * 0x10 = Cassini+
27 * 0x11 = Cassini+ 0.2u
28 *
29 * vendor id: 0x100b (National Semiconductor)
30 * device id: 0x0035 (DP83065/Saturn)
31 * revision ids: 0x30 = Saturn B2
32 *
33 * rings are all offset from 0.
34 *
35 * there are two clock domains:
36 * PCI: 33/66MHz clock
37 * chip: 125MHz clock
38 */
39
40#ifndef _CASSINI_H
41#define _CASSINI_H
42
43/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
44 * 32-bit words. there is no i/o port access. REG_ addresses are
45 * shared between cassini and cassini+. REG_PLUS_ addresses only
46 * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
47 */
48#define CAS_ID_REV2 0x02
49#define CAS_ID_REVPLUS 0x10
50#define CAS_ID_REVPLUS02u 0x11
51#define CAS_ID_REVSATURNB2 0x30
52
53/** global resources **/
54
55/* this register sets the weights for the weighted round robin arbiter. e.g.,
56 * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
57 * for its next turn to access the pci bus.
58 * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
59 * DEFAULT: 0x0, SIZE: 5 bits
60 */
61#define REG_CAWR 0x0004 /* core arbitration weight */
62#define CAWR_RX_DMA_WEIGHT_SHIFT 0
63#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */
64#define CAWR_TX_DMA_WEIGHT_SHIFT 2
65#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */
66#define CAWR_RR_DIS 0x10 /* [4] */
67
68/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
69 * sizes determined by length of packet or descriptor transfer and the
70 * max length allowed by the target.
71 * DEFAULT: 0x0, SIZE: 1 bit
72 */
73#define REG_INF_BURST 0x0008 /* infinite burst enable reg */
74#define INF_BURST_EN 0x1 /* enable */
75
76/* top level interrupts [0-9] are auto-cleared to 0 when the status
77 * register is read. second level interrupts [13 - 18] are cleared at
78 * the source. tx completion register 3 is replicated in [19 - 31]
79 * DEFAULT: 0x00000000, SIZE: 29 bits
80 */
81#define REG_INTR_STATUS 0x000C /* interrupt status register */
82#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set
83 xferred from host queue to
84 TX FIFO */
85#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into
86 TX FIFO. i.e.,
87 TX Kick == TX complete. if
88 PACED_MODE set, then TX FIFO
89 also empty */
90#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx
91 FIFO */
92#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing
93 corrupted. FATAL ERROR */
94#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred
95 from RX FIFO to host mem.
96 RX completion reg updated.
97 may be delayed by recv
98 intr blanking. */
99#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers.
100 RX Kick == RX complete */
101#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing
102 corrupted. FATAL ERROR */
103#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion
104 ring to post descriptors.
105 RX complete head incr to
106 almost reach RX complete
107 tail */
108#define INTR_RX_BUF_AE 0x00000100 /* less than the
109 programmable threshold #
110 of free descr avail for
111 hw use */
112#define INTR_RX_COMP_AF 0x00000200 /* less than the
113 programmable threshold #
114 of descr spaces for hw
115 use in completion descr
116 ring */
117#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC !=
118 len of non-reassembly pkt
119 from fifo during DMA or
120 header parser provides TCP
121 header and payload size >
122 MAC packet size.
123 FATAL ERROR */
124#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this
125 bit will be set if an interrupt
126 generated on the pci bus. useful
127 when driver is polling for
128 interrupts */
129#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */
130#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at
131 least 1 unmasked interrupt set */
132#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at
133 least 1 unmasked interrupt set */
134#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has
135 at least 1 unmasked interrupt
136 set */
137#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least
138 1 unmasked interrupt set */
139#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the
140 BIF has at least 1 unmasked
141 interrupt set */
142#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion
143 3 reg data */
144#define INTR_TX_COMP_3_SHIFT 19
145#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
146 INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
147 INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
148 INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
149 INTR_MAC_CTRL_STATUS)
150
151/* determines which status events will cause an interrupt. layout same
152 * as REG_INTR_STATUS.
153 * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
154 */
155#define REG_INTR_MASK 0x0010 /* Interrupt mask */
156
157/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
158 * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
159 * DEFAULT: 0x00000000, SIZE: 12 bits
160 */
161#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask
162 (used w/ status alias) */
163/* same as REG_INTR_STATUS except that only bits cleared are those selected by
164 * REG_ALIAS_CLEAR
165 * DEFAULT: 0x00000000, SIZE: 29 bits
166 */
167#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias
168 (selective clear) */
169
170/* DEFAULT: 0x0, SIZE: 3 bits */
171#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */
172#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+.
173 set if no ACK64# during ABS64 cycle
174 in Cassini. */
175#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if
176 no read retry after 2^15 clocks */
177#define PCI_ERR_OTHER 0x04 /* other PCI errors */
178#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req.
179 unused in Cassini. */
180#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req.
181 unused in Cassini. */
182#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during
183 DMA. unused in cassini. */
184
185/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
186 * causes an interrupt to be generated.
187 * DEFAULT: 0x7, SIZE: 3 bits
188 */
189#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */
190
191/* used to configure PCI related parameters that are not in PCI config space.
192 * DEFAULT: 0bxx000, SIZE: 5 bits
193 */
194#define REG_BIM_CFG 0x1008 /* BIM Configuration */
195#define BIM_CFG_RESERVED0 0x001 /* reserved */
196#define BIM_CFG_RESERVED1 0x002 /* reserved */
197#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */
198#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */
199#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */
200#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */
201#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */
202#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */
203#define BIM_CFG_RESERVED2 0x100 /* reserved */
204#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global
205 reset. reserved in Cassini. */
206#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended.
207 reserved in Cassini. */
208#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0.
209 reserved in Cassini. */
210
211/* DEFAULT: 0x00000000, SIZE: 32 bits */
212#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */
213#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state
214 machine bits [21:0] */
215#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state
216 machine bits [6:0] */
217
218/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
219 * reset. poll until TX and RX read back as 0's for completion.
220 */
221#define REG_SW_RESET 0x1010 /* Software reset */
222#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until
223 cleared to 0. */
224#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until
225 cleared to 0. */
226#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low).
227 resets PHY and anything else
228 connected to RSTOUT#. RSTOUT#
229 is also activated by local PCI
230 reset when hot-swap is being
231 done. */
232#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with
233 this bit set, PCS and SLINK
234 modules won't be reset.
235 i.e., link won't drop. */
236#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */
237#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits:
238 0b000: ARB_IDLE1
239 0b001: ARB_IDLE2
240 0b010: ARB_WB_ACK
241 0b011: ARB_WB_WAT
242 0b100: ARB_RB_ACK
243 0b101: ARB_RB_WAT
244 0b110: ARB_RB_END
245 0b111: ARB_WB_END */
246#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits:
247 0b00: RD_PCI_WAT
248 0b01: RD_PCI_RDY
249 0b11: RD_PCI_ACK */
250#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits:
251 0b00: AD_IDL_RX
252 0b01: AD_ACK_RX
253 0b10: AD_ACK_TX
254 0b11: AD_IDL_TX */
255#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits
256 0b00: WR_PCI_WAT
257 0b01: WR_PCI_RDY
258 0b11: WR_PCI_ACK */
259#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits:
260 0b000: ARB_IDLE1
261 0b001: ARB_IDLE2
262 0b010: ARB_TX_ACK
263 0b011: ARB_TX_WAT
264 0b100: ARB_RX_ACK
265 0b110: ARB_RX_WAT */
266
267/* Cassini only. 64-bit register used to check PCI datapath. when read,
268 * value written has both lower and upper 32-bit halves rotated to the right
269 * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
270 */
271#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test
272 Cassini+: reserved */
273
274/* output enables are provided for each device's chip select and for the rest
275 * of the outputs from cassini to its local bus devices. two sw programmable
276 * bits are connected to general purpus control/status bits.
277 * DEFAULT: 0x7
278 */
279#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device
280 output EN. default: 0x7 */
281#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and
282 OE signal output enable on the
283 local bus interface. these
284 are shared between both local
285 bus devices. tristate when 0. */
286#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */
287#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip
288 select output enable */
289#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */
290#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */
291#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */
292
293/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
294 * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
295 * _DATA_HI should be the last access of the sequence.
296 * DEFAULT: undefined
297 */
298#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for
299 purposes. */
300#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */
301#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1
302 read buffer access = 0 */
303/* DEFAULT: undefined */
304#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */
305#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */
306
307/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
308 * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
309 */
310#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST
311 control/status */
312#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */
313#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer.
314 Cassini only. reserved in
315 Cassini+. */
316#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read
317 buffer. */
318#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write
319 buffer. Cassini only. reserved
320 in Cassini+. */
321#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */
322#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */
323#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST.
324 Cassini only. reserved in
325 Cassini+. */
326#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST.
327 Cassini only. reserved in
328 Cassini+. */
329
330/* ASUN: i'm not sure what this does as it's not in the spec.
331 * DEFAULT: 0xFC
332 */
333#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux
334 select register */
335
336/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
337 * values for _SEL_HI_MASK and _SEL_LOW_MASK:
338 * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
339 * wtc empty r, post pci)
340 * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
341 * pci rpkt comp, txdma wr req, txdma wr ack,
342 * txdma wr rdy, txdma wr xfr done)
343 * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
344 * rd arb state, rd pci state)
345 * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
346 * wrpci state)
347 * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8]
348 * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24]
349 * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40]
350 * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56]
351 * the following are not available in Cassini:
352 * 0xc: rx probe[7:0] 0xd: tx probe[7:0]
353 * 0xe: hp probe[7:0] 0xf: mac probe[7:0]
354 */
355#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */
356#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be
357 driven on local bus P_A[15:0]
358 for debugging */
359#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals:
360 0x03 = mac[1:0]
361 0x0C = rx[1:0]
362 0x30 = tx[1:0]
363 0xC0 = hp[1:0] */
364#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear
365 on P_A[15:8]. see above for
366 values. */
367#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear
368 on P_A[7:0]. see above for
369 values. */
370
371/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
372 DEFAULT: 0x1F */
373#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask
374 register 2 for INTB */
375#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
376/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
377 * all of the alternate (2-4) INTR registers while _1 corresponds to only
378 * _MASK_1 and _STATUS_1 registers.
379 * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
380 */
381#define INTR_RX_DONE_ALT 0x01
382#define INTR_RX_COMP_FULL_ALT 0x02
383#define INTR_RX_COMP_AF_ALT 0x04
384#define INTR_RX_BUF_UNAVAIL_1 0x08
385#define INTR_RX_BUF_AE_1 0x10 /* almost empty */
386#define INTRN_MASK_RX_EN 0x80
387#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \
388 INTR_RX_COMP_FULL_ALT | \
389 INTR_RX_COMP_AF_ALT | \
390 INTR_RX_BUF_UNAVAIL_1 | \
391 INTR_RX_BUF_AE_1)
392#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status
393 register 2 for INTB. default: 0x1F */
394#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
395#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the
396 flags are set. enables desc ring. */
397
398#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask
399 register 2 for INTB */
400#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
401
402#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status
403 register alias 2 for INTB */
404#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
405
406#define REG_SATURN_PCFG 0x106c /* pin configuration register for
407 integrated macphy */
408
409#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */
410#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */
411#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */
412#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */
413#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */
414#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode.
415 0 = normal */
416#define SATURN_PCFG_MTP 0x00000080 /* test point select */
417#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 =
418 GMII on SERDES pins for
419 monitoring. */
420#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all
421 pins configed as outputs.
422 for power saving when using
423 internal phy. */
424#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl
425 polarity from strapping
426 value.
427 1 = mac core led ctrl
428 polarity active low. */
429
430
431/** transmit dma registers **/
432#define MAX_TX_RINGS_SHIFT 2
433#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT)
434#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1)
435
436/* TX configuration.
437 * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
438 * DEFAULT: 0x3F000001
439 */
440#define REG_TX_CFG 0x2004 /* TX config */
441#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA
442 will stop after xfer of current
443 buffer has been completed. */
444#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be
445 accessed w/ FIFO addr
446 and data registers.
447 TX DMA should be
448 disabled. */
449#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in
450 ring 1. */
451#define TX_CFG_DESC_RING0_SHIFT 2
452#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4)
453#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4)
454#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after
455 TX FIFO becomes empty.
456 if 0, TX_ALL set
457 if descr queue empty. */
458#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */
459#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at
460 the end of every packet kicked
461 through Q1. */
462#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at
463 the end of every packet kicked
464 through Q2. */
465#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at
466 the end of every packet kicked
467 through Q3 */
468#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at
469 the end of every packet kicked
470 through Q4 */
471#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion
472 writeback */
473#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port
474 connection
475 0b00: tx mac req,
476 tx mac retry req,
477 tx ack and tx tag.
478 0b01: txdma rd req,
479 txdma rd ack,
480 txdma rd rdy,
481 txdma rd type0
482 0b11: txdma wr req,
483 txdma wr ack,
484 txdma wr rdy,
485 txdma wr xfr done. */
486#define TX_CFG_CTX_SEL_SHIFT 30
487
488/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
489 * used for diagnostics only.
490 */
491#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */
492#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write
493 pointer. temp hold reg.
494 diagnostics only. */
495#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */
496#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read
497 pointer */
498
499/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
500#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */
501
502/* current state of all state machines in TX */
503#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */
504#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */
505#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */
506#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine.
507 = 0x01 when TX disabled. */
508#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */
509#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller
510 state machine */
511#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */
512
513#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */
514#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */
515#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */
516#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */
517
518/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
519 * while the upper 23 bits are taken from the TX descriptor
520 */
521#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */
522#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */
523
524/* 13 bit registers written by driver w/ descriptor value that follows
525 * last valid xmit descriptor. kick # and complete # values are used by
526 * the xmit dma engine to control tx descr fetching. if > 1 valid
527 * tx descr is available within the cache line being read, cassini will
528 * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
529 */
530#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */
531#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4)
532#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */
533#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4)
534
535/* values of TX_COMPLETE_1-4 are written. each completion register
536 * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
537 * NOTE: completion reg values are only written back prior to TX_INTME and
538 * TX_ALL interrupts. at all other times, the most up-to-date index values
539 * should be obtained from the REG_TX_COMPLETE_# registers.
540 * here's the layout:
541 * offset from base addr completion # byte
542 * 0 TX_COMPLETE_1_MSB
543 * 1 TX_COMPLETE_1_LSB
544 * 2 TX_COMPLETE_2_MSB
545 * 3 TX_COMPLETE_2_LSB
546 * 4 TX_COMPLETE_3_MSB
547 * 5 TX_COMPLETE_3_LSB
548 * 6 TX_COMPLETE_4_MSB
549 * 7 TX_COMPLETE_4_LSB
550 */
551#define TX_COMPWB_SIZE 8
552#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back
553 base low */
554#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back
555 base high */
556#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL
557#define TX_COMPWB_MSB_SHIFT 0
558#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL
559#define TX_COMPWB_LSB_SHIFT 8
560#define TX_COMPWB_NEXT(x) ((x) >> 16)
561
562/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
563 * be 2KB-aligned. */
564#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */
565#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */
566#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8)
567#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8)
568
569/* 16-bit registers hold weights for the weighted round-robin of the
570 * four CBQ TX descr rings. weights correspond to # bytes xferred from
571 * host to TXFIFO in a round of WRR arbitration. can be set
572 * dynamically with new weights set upon completion of the current
573 * packet transfer from host memory to TXFIFO. a dummy write to any of
574 * these registers causes a queue1 pre-emption with all historical bw
575 * deficit data reset to 0 (useful when congestion requires a
576 * pre-emption/re-allocation of network bandwidth
577 */
578#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */
579#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */
580#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */
581#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */
582
583/* diagnostics access to any TX FIFO location. every access is 65
584 * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
585 * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
586 * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
587 * TX FIFO data integrity is desired, TX DMA should be
588 * disabled. _DATA_HI_Tx should be the last access of the sequence.
589 */
590#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */
591#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */
592#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */
593#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */
594#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */
595#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */
596
597/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
598 * passed for the specified memory
599 */
600#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */
601#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST
602 controller state machine */
603#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */
604#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */
605#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */
606#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */
607#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */
608#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self
609 clears on completion. */
610
611/** receive dma registers **/
612#define MAX_RX_DESC_RINGS 2
613#define MAX_RX_COMP_RINGS 4
614
615/* receive DMA channel configuration. default: 0x80910
616 * free ring size = (1 << n)*32 -> [32 - 8k]
617 * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
618 * DEFAULT: 0x80910
619 */
620#define REG_RX_CFG 0x4000 /* RX config */
621#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops
622 channel as soon as current
623 frame xfer has completed.
624 driver should disable MAC
625 for 200ms before disabling
626 RX */
627#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX
628 free desc ring.
629 def: 0x8 = 8k */
630#define RX_CFG_DESC_RING_SHIFT 1
631#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete
632 ring. def: 0x8 = 32k */
633#define RX_CFG_COMP_RING_SHIFT 5
634#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc
635 batching. def: 0x0 =
636 enabled */
637#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st
638 data byte of the packet
639 w/in 8 byte boundares.
640 this swivels the data
641 DMA'ed to header
642 buffers, jumbo buffers
643 when header split is not
644 requested and MTU sized
645 buffers. def: 0x2 */
646#define RX_CFG_SWIVEL_SHIFT 10
647
648/* cassini+ only */
649#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in
650 RX free desc ring 2.
651 def: 0x8 = 8k */
652#define RX_CFG_DESC_RING1_SHIFT 16
653
654
655/* the page size register allows cassini chips to do the following with
656 * received data:
657 * [--------------------------------------------------------------] page
658 * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
659 * |--------------| = PAGE_SIZE_BUFFER_STRIDE
660 * page = PAGE_SIZE
661 * offset = PAGE_SIZE_MTU_OFF
662 * for the above example, MTU_BUFFER_COUNT = 4.
663 * NOTE: as is apparent, you need to ensure that the following holds:
664 * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
665 * DEFAULT: 0x48002002 (8k pages)
666 */
667#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */
668#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to
669 by receive descriptors.
670 if jumbo buffers are
671 supported the page size
672 should not be < 8k.
673 0b00 = 2k, 0b01 = 4k
674 0b10 = 8k, 0b11 = 16k
675 DEFAULT: 8k */
676#define RX_PAGE_SIZE_SHIFT 0
677#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw
678 packs into a page.
679 DEFAULT: 4 */
680#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
681#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
682 each MTU buffer +
683 offset from each
684 other.
685 0b00 = 1k, 0b01 = 2k
686 0b10 = 4k, 0b11 = 8k
687 DEFAULT: 0x1 */
688#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
689#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that
690 hw writes the MTU buffer
691 into.
692 0b00 = 0,
693 0b01 = 64 bytes
694 0b10 = 96, 0b11 = 128
695 DEFAULT: 0x1 */
696#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30
697
698/* 11-bit counter points to next location in RX FIFO to be loaded/read.
699 * shadow write pointers enable retries in case of early receive aborts.
700 * DEFAULT: 0x0. generated on 64-bit boundaries.
701 */
702#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */
703#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */
704#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write
705 pointer */
706#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read
707 pointer */
708#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read
709 pointer. (8-bit counter) */
710
711/* current state of RX DMA state engines + other info
712 * DEFAULT: 0x0
713 */
714#define REG_RX_DEBUG 0x401C /* RX debug */
715#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC:
716 0x0 = idle, 0x1 = load_bop
717 0x2 = load 1, 0x3 = load 2
718 0x4 = load 3, 0x5 = load 4
719 0x6 = last detect
720 0x7 = wait req
721 0x8 = wait req statuss 1st
722 0x9 = load st
723 0xa = bubble mac
724 0xb = error */
725#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and
726 RX FIFO:
727 0x0 = idle, 0x1 = hp xfr
728 0x2 = wait hp ready
729 0x3 = wait flow code
730 0x4 = fifo xfer
731 0x5 = make status
732 0x6 = csum ready
733 0x7 = error */
734#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine
735 w/ MAC:
736 0x0 = idle
737 0x1 = wait xoff ack
738 0x2 = wait xon
739 0x3 = wait xon ack */
740#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine
741 states:
742 0x0 = idle data
743 0x1 = header begin
744 0x2 = xfer header
745 0x3 = xfer header ld
746 0x4 = mtu begin
747 0x5 = xfer mtu
748 0x6 = xfer mtu ld
749 0x7 = jumbo begin
750 0x8 = xfer jumbo
751 0x9 = xfer jumbo ld
752 0xa = reas begin
753 0xb = xfer reas
754 0xc = flush tag
755 0xd = xfer reas ld
756 0xe = error
757 0xf = bubble idle */
758#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine
759 states:
760 0x0 = idle desc
761 0x1 = wait ack
762 0x9 = wait ack 2
763 0x2 = fetch desc 1
764 0xa = fetch desc 2
765 0x3 = load ptrs
766 0x4 = wait dma
767 0x5 = wait ack batch
768 0x6 = post batch
769 0x7 = xfr done */
770#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the
771 interrupt queue */
772#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer
773 of the interrupt queue */
774
775/* flow control frames are emmitted using two PAUSE thresholds:
776 * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
777 * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
778 * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
779 * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
780 * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
781 * value is is 0x6F.
782 * DEFAULT: 0x00078
783 */
784#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
785#define RX_PAUSE_THRESH_QUANTUM 64
786#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when
787 RX FIFO occupancy >
788 value*64B */
789#define RX_PAUSE_THRESH_OFF_SHIFT 0
790#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after
791 emitting XOFF PAUSE when RX
792 FIFO occupancy falls below
793 this value*64B. must be
794 < XOFF threshold. if =
795 RX_FIFO_SIZE< XON frames are
796 never emitted. */
797#define RX_PAUSE_THRESH_ON_SHIFT 12
798
799/* 13-bit register used to control RX desc fetching and intr generation. if 4+
800 * valid RX descriptors are available, Cassini will read 4 at a time.
801 * writing N means that all desc up to *but* excluding N are available. N must
802 * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
803 * DEFAULT: 0 on reset
804 */
805#define REG_RX_KICK 0x4024 /* RX kick reg */
806
807/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
808 * lower 13 bits of the low register are hard-wired to 0.
809 */
810#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring
811 base low */
812#define REG_RX_DB_HI 0x402C /* RX descriptor ring
813 base hi */
814#define REG_RX_CB_LOW 0x4030 /* RX completion ring
815 base low */
816#define REG_RX_CB_HI 0x4034 /* RX completion ring
817 base hi */
818/* 13-bit register indicate desc used by cassini for receive frames. used
819 * for diagnostic purposes.
820 * DEFAULT: 0 on reset
821 */
822#define REG_RX_COMP 0x4038 /* (ro) RX completion */
823
824/* HEAD and TAIL are used to control RX desc posting and interrupt
825 * generation. hw moves the head register to pass ownership to sw. sw
826 * moves the tail register to pass ownership back to hw. to give all
827 * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no
828 * more entries are available, DMA will pause and an interrupt will be
829 * generated to indicate no more entries are available. sw can use
830 * this interrupt to reduce the # of times it must update the
831 * completion tail register.
832 * DEFAULT: 0 on reset
833 */
834#define REG_RX_COMP_HEAD 0x403C /* RX completion head */
835#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */
836
837/* values used for receive interrupt blanking. loaded each time the ISR is read
838 * DEFAULT: 0x00000000
839 */
840#define REG_RX_BLANK 0x4044 /* RX blanking register
841 for ISR read */
842#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if
843 this many sets of completion
844 writebacks (up to 2 packets)
845 occur since the last time
846 the ISR was read. 0 = no
847 packet blanking */
848#define RX_BLANK_INTR_PKT_SHIFT 0
849#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted
850 if that many clocks were
851 counted since last time the
852 ISR was read.
853 each count is 512 core
854 clocks (125MHz). 0 = no
855 time blanking */
856#define RX_BLANK_INTR_TIME_SHIFT 12
857
858/* values used for interrupt generation based on threshold values of how
859 * many free desc and completion entries are available for hw use.
860 * DEFAULT: 0x00000000
861 */
862#define REG_RX_AE_THRESH 0x4048 /* RX almost empty
863 thresholds */
864#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be
865 generated if # desc
866 avail for hw use <=
867 # */
868#define RX_AE_THRESH_FREE_SHIFT 0
869#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be
870 generated if # of
871 completion entries
872 avail for hw use <=
873 # */
874#define RX_AE_THRESH_COMP_SHIFT 13
875
876/* probabilities for random early drop (RED) thresholds on a FIFO threshold
877 * basis. probability should increase when the FIFO level increases. control
878 * packets are never dropped and not counted in stats. probability programmed
879 * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
880 * DEFAULT: 0x00000000
881 */
882#define REG_RX_RED 0x404C /* RX random early detect enable */
883#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */
884#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */
885#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */
886#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */
887
888/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
889 * RX control FIFO = # of packets in RX FIFO.
890 * DEFAULT: 0x0
891 */
892#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */
893#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */
894#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */
895#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */
896#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */
897#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */
898#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr
899 high */
900
901/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
902 * START/COMPLETE is writeable. START will clear when the BIST has completed
903 * checking all 17 RAMS.
904 * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
905 */
906#define REG_RX_BIST 0x4060 /* (ro) RX BIST */
907#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */
908#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */
909#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */
910#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */
911#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */
912#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */
913#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */
914#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */
915#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */
916#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */
917#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */
918#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */
919#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */
920#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */
921#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */
922#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */
923#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */
924#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */
925#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete,
926 summary pass bit
927 contains AND of BIST
928 results of all 16
929 RAMS */
930#define RX_BIST_START 0x00000001 /* write 1 to start
931 BIST. self clears
932 on completion. */
933
934/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
935 * from to retrieve packet control info.
936 * DEFAULT: 0
937 */
938#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO
939 write ptr */
940#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read
941 ptr */
942
943/* receive interrupt blanking. loaded each time interrupt alias register is
944 * read.
945 * DEFAULT: 0x0
946 */
947#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for
948 alias read */
949#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if #
950 completion writebacks
951 > # since last ISR
952 read. 0 = no
953 blanking. up to 2
954 packets per
955 completion wb. */
956#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if #
957 clocks > # since last
958 ISR read. each count
959 is 512 core clocks
960 (125MHz). 0 = no
961 blanking. */
962
963/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
964 * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
965 * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
966 * to normal operation after diagnostics, write to address location 0x0.
967 * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
968 * be the last write access of a write sequence.
969 * DEFAULT: undefined
970 */
971#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */
972#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */
973#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */
974#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */
975#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */
976
977/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
978 * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
979 * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
980 * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
981 * should be last write access of the write sequence.
982 * DEFAULT: undefined
983 */
984#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and
985 Batching FIFO addr */
986#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data
987 low */
988#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data
989 mid */
990#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data
991 hi and flow id */
992#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */
993#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */
994
995/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
996 * DEFAULT: undefined
997 */
998#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */
999#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */
1000#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */
1001#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high
1002 T0 */
1003#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high
1004 T1 */
1005
1006/* 64-bit pointer to receive data buffer in host memory used for headers and
1007 * small packets. MSB in high register. loaded by DMA state machine and
1008 * increments as DMA writes receive data. only 50 LSB are incremented. top
1009 * 13 bits taken from RX descriptor.
1010 * DEFAULT: undefined
1011 */
1012#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr
1013 low */
1014#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr
1015 high */
1016#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer
1017 low */
1018#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer
1019 high */
1020
1021/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
1022 * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
1023 * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
1024 * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
1025 * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
1026 * layout:
1027 * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
1028 * DEFAULT: undefined
1029 */
1030#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table
1031 address */
1032#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */
1033
1034#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table
1035 data low */
1036#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table
1037 data mid */
1038#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table
1039 data high */
1040
1041/* cassini+ only */
1042/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
1043 * 0. same semantics as primary desc/complete rings.
1044 */
1045#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring
1046 2 base low */
1047#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring
1048 2 base high */
1049#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring
1050 2 base low. 4 total */
1051#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring
1052 2 base high. 4 total */
1053#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
1054#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
1055#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */
1056#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2
1057 reg */
1058#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2
1059 head reg. 4 total. */
1060#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2
1061 tail reg. 4 total. */
1062#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
1063#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
1064#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2
1065 thresholds */
1066#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK
1067#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT
1068
1069/** header parser registers **/
1070
1071/* RX parser configuration register.
1072 * DEFAULT: 0x1651004
1073 */
1074#define REG_HP_CFG 0x4140 /* header parser
1075 configuration reg */
1076#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */
1077#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors
1078 0 = 64. 0x3f = 63 */
1079#define HP_CFG_NUM_CPU_SHIFT 2
1080#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment
1081 TCP seq # by one when
1082 stored in FDBM */
1083#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data
1084 needed to be considered
1085 for reassembly */
1086#define HP_CFG_TCP_THRESH_SHIFT 9
1087
1088/* access to RX Instruction RAM. 5-bit register/counter holds addr
1089 * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
1090 * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
1091 * of sequence.
1092 * DEFAULT: undefined
1093 */
1094#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM
1095 address */
1096#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */
1097#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM
1098 data low */
1099#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF
1100#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0
1101#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000
1102#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
1103#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000
1104#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20
1105#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000
1106#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22
1107#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM
1108 data mid */
1109#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003
1110#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0
1111#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C
1112#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2
1113#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0
1114#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6
1115#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800
1116#define HP_INSTR_RAM_MID_FOFF_SHIFT 11
1117#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000
1118#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18
1119#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000
1120#define HP_INSTR_RAM_MID_SOFF_SHIFT 23
1121#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000
1122#define HP_INSTR_RAM_MID_OP_SHIFT 30
1123#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM
1124 data high */
1125#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF
1126#define HP_INSTR_RAM_HI_VAL_SHIFT 0
1127#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000
1128#define HP_INSTR_RAM_HI_MASK_SHIFT 16
1129
1130/* PIO access into RX Header parser data RAM and flow database.
1131 * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
1132 * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
1133 * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
1134 * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
1135 * flow database.
1136 * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
1137 * should be the last write access of the write sequence.
1138 * DEFAULT: undefined
1139 */
1140#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB
1141 RAM address */
1142#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte
1143 locations in header
1144 parser data ram to
1145 read/write */
1146#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations
1147 in the flow database */
1148#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */
1149
1150/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
1151 * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
1152 * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0]
1153 * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
1154 * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0]
1155 * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
1156 * FLOW_DB(10) = bit 0 has value for flow valid
1157 * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
1158 */
1159#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */
1160#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4)
1161
1162/* diagnostics for RX Header Parser block.
1163 * ASUN: the header parser state machine register is used for diagnostics
1164 * purposes. however, the spec doesn't have any details on it.
1165 */
1166#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */
1167#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */
1168#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */
1169#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */
1170#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU
1171 number */
1172#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */
1173
1174#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */
1175#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */
1176#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */
1177#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */
1178#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */
1179
1180#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */
1181#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */
1182#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start
1183 start offset */
1184#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */
1185#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */
1186#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o
1187 reassembly */
1188#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split
1189 enable */
1190#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload
1191 check */
1192#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length
1193 equal to zero */
1194#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload
1195 chk */
1196#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload
1197 threshold */
1198#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */
1199#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */
1200#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */
1201#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */
1202#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */
1203#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */
1204
1205/* BIST for header parser(HP) and flow database memories (FDBM). set _START
1206 * to start BIST. controller clears _START on completion. _START can also
1207 * be cleared to force termination of BIST. a bit set indicates that that
1208 * memory passed its BIST.
1209 */
1210#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */
1211#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */
1212#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */
1213#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */
1214#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */
1215#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */
1216#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */
1217#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0
1218 bank 0 */
1219#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1
1220 bank 0 */
1221#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2
1222 bank 0 */
1223#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3
1224 bank 0 */
1225#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0
1226 bank 1 */
1227#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1
1228 bank 2 */
1229#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2
1230 bank 1 */
1231#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3
1232 bank 1 */
1233#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence
1234 RAM */
1235#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */
1236#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */
1237
1238
1239/** MAC registers. **/
1240/* reset bits are set using a PIO write and self-cleared after the command
1241 * execution has completed.
1242 */
1243#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset
1244 command (default: 0x0) */
1245#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset
1246 command (default: 0x0) */
1247/* execute a pause flow control frame transmission
1248 DEFAULT: 0x0XXXX */
1249#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */
1250#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time
1251 to be sent on network
1252 in units of slot
1253 times */
1254#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl
1255 frame on network */
1256
1257/* bit set indicates that event occurred. auto-cleared when status register
1258 * is read and have corresponding mask bits in mask register. events will
1259 * trigger an interrupt if the corresponding mask bit is 0.
1260 * status register default: 0x00000000
1261 * mask register default = 0xFFFFFFFF on reset
1262 */
1263#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */
1264#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame
1265 transmision */
1266#define MAC_TX_UNDERRUN 0x0002 /* terminated frame
1267 transmission due to
1268 data starvation in the
1269 xmit data path */
1270#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed
1271 length passed to TX MAC
1272 by the DMA engine */
1273#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal
1274 collision counter */
1275#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive
1276 collision counter */
1277#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late
1278 collision counter */
1279#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first
1280 collision counter */
1281#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer
1282 timer */
1283#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak
1284 attempts counter */
1285
1286#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */
1287#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of
1288 a frame */
1289#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to
1290 RX FIFO overflow */
1291#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame
1292 counter */
1293#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment
1294 error counter */
1295#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error
1296 counter */
1297#define MAC_RX_LEN_ERR 0x0020 /* rollover of length
1298 error counter */
1299#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code
1300 violation error */
1301
1302/* DEFAULT: 0xXXXX0000 on reset */
1303#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */
1304#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful
1305 reception of a
1306 pause control
1307 frame */
1308#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a
1309 transition from
1310 "not paused" to
1311 "paused" */
1312#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a
1313 transition from
1314 "paused" to "not
1315 paused" */
1316#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time
1317 operand that was
1318 received in the last
1319 pause flow control
1320 frame */
1321
1322/* layout identical to TX MAC[8:0] */
1323#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */
1324/* layout identical to RX MAC[6:0] */
1325#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */
1326/* layout identical to CTRL MAC[2:0] */
1327#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */
1328
1329/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
1330 * imposed before writes to other bits in the TX_MAC_CFG register or any of
1331 * the MAC parameters is performed. delay dependent upon time required to
1332 * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
1333 * the delay for a 1518-byte frame on a 100Mbps network is 125us.
1334 * alternatively, just poll TX_CFG_EN until it reads back as 0.
1335 * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
1336 * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
1337 * be 0x200 (slot time of 512 bytes)
1338 */
1339#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */
1340#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will
1341 force TXMAC state
1342 machine to remain in
1343 idle state or to
1344 transition to idle state
1345 on completion of an
1346 ongoing packet. */
1347#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral
1348 process. set to 1 when
1349 full duplex and 0 when
1350 half duplex */
1351#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff
1352 algorithm. set to 1 when
1353 full duplex and 0 when
1354 half duplex */
1355#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the
1356 Rx-to-TX IPG. after
1357 receiving a frame, TX
1358 MAC will reset its
1359 deferral process to
1360 carrier sense for the
1361 amount of time = IPG0 +
1362 IPG1 and commit to
1363 transmission for time
1364 specified in IPG2. when
1365 0 or when xmitting frames
1366 back-to-pack (Tx-to-Tx
1367 IPG), TX MAC ignores
1368 IPG0 and will only use
1369 IPG1 for deferral time.
1370 IPG2 still used. */
1371#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily
1372 give up on frame
1373 xmission. if backoff
1374 algorithm reaches the
1375 ATTEMPT_LIMIT, it will
1376 clear attempts counter
1377 and continue trying to
1378 send the frame as
1379 specified by
1380 GIVE_UP_LIM. when 0,
1381 TX MAC will execute
1382 standard CSMA/CD prot. */
1383#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will
1384 continue to try to xmit
1385 until successful. when
1386 0, TX MAC will continue
1387 to try xmitting until
1388 successful or backoff
1389 algorithm reaches
1390 ATTEMPT_LIMIT*16 */
1391#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable
1392 backoff algorithm. TX
1393 MAC will not back off
1394 after a xmission attempt
1395 that resulted in a
1396 collision. */
1397#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that
1398 deferral process is reset
1399 in response to carrier
1400 sense during the entire
1401 duration of IPG. TX MAC
1402 will only commit to frame
1403 xmission after frame
1404 xmission has actually
1405 begun. */
1406#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate
1407 CRC for all xmitted
1408 packets. when clear, CRC
1409 generation is dependent
1410 upon NO_CRC bit in the
1411 xmit control word from
1412 TX DMA */
1413#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the
1414 carrier extension
1415 feature. this allows for
1416 longer collision domains
1417 by extending the carrier
1418 and collision window
1419 from the end of FCS until
1420 the end of the slot time
1421 if necessary. Required
1422 for half-duplex at 1Gbps,
1423 clear otherwise. */
1424
1425/* when CRC is not stripped, reassembly packets will not contain the CRC.
1426 * these will be stripped by HRP because it reassembles layer 4 data, and the
1427 * CRC is layer 2. however, non-reassembly packets will still contain the CRC
1428 * when passed to the host. to ensure proper operation, need to wait 3.2ms
1429 * after clearing RX_CFG_EN before writing to any other RX MAC registers
1430 * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
1431 * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
1432 * restrictions as CFG_EN.
1433 */
1434#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */
1435#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */
1436#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0.
1437 feature not supported */
1438#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the
1439 last 4 bytes of a
1440 received frame. */
1441#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */
1442#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid
1443 multicast frames (group
1444 bit in DA field set) */
1445#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter
1446 multicast addresses */
1447#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use
1448 address filtering regs
1449 to filter both unicast
1450 and multicast
1451 addresses */
1452#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to
1453 RX DMA by setting BAD
1454 bit but not Abort bit
1455 in the status. CRC,
1456 framing, and length errs
1457 will not increment
1458 error counters. frames
1459 which don't match dest
1460 addr will be passed up
1461 w/ BAD bit set. */
1462#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of
1463 packet bursts generated
1464 by carrier extension
1465 with packet bursting
1466 senders. only applies
1467 to half-duplex 1Gbps */
1468
1469/* DEFAULT: 0x0 */
1470#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */
1471#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for
1472 sending pause flow ctrl
1473 frames */
1474#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received
1475 pause flow ctrl frames */
1476#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl
1477 packets to RX DMA */
1478
1479/* to ensure proper operation, a global initialization sequence should be
1480 * performed when a loopback config is entered or exited. if programmed after
1481 * a hw or global sw reset, RX/TX MAC software reset and initialization
1482 * should be done to ensure stable clocking.
1483 * DEFAULT: 0x0
1484 */
1485#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */
1486#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers
1487 on MII xmit bus */
1488#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data
1489 path to GMII recv data
1490 path. phy mode register
1491 clock selection must be
1492 set to GMII mode and
1493 GMII_MODE should be set
1494 to 1. in loopback mode,
1495 REFCLK will drive the
1496 entire mac core. 0 for
1497 normal operation. */
1498#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data
1499 path during packet
1500 xmission. clear to 0
1501 in any full duplex mode,
1502 in any loopback mode,
1503 or in half-duplex SERDES
1504 or SLINK modes. set when
1505 in half-duplex when
1506 using external phy. */
1507#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII
1508 clocks and datapath */
1509#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable
1510 external tristate buffer
1511 on the MII receive
1512 bus. */
1513#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */
1514#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */
1515
1516#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg.
1517 recommended: 0x00 */
1518#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg
1519 recommended: 0x08 */
1520#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg
1521 recommended: 0x04 */
1522#define REG_MAC_SLOT_TIME 0x604C /* slot time reg
1523 recommended: 0x40 */
1524#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg
1525 recommended: 0x40 */
1526
1527/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
1528 * recommended value: 0x2000.05EE
1529 */
1530#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */
1531#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */
1532#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16
1533#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */
1534#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0
1535#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of
1536 preamble bytes that the
1537 TX MAC will xmit at the
1538 beginning of each frame
1539 value should be 2 or
1540 greater. recommended
1541 value: 0x07 */
1542#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration
1543 of jam in units of media
1544 byte time. recommended
1545 value: 0x04 */
1546#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. #
1547 of attempts TX MAC will
1548 make to xmit a frame
1549 before it resets its
1550 attempts counter. after
1551 the limit has been
1552 reached, TX MAC may or
1553 may not drop the frame
1554 dependent upon value
1555 in TX_MAC_CFG.
1556 recommended
1557 value: 0x10 */
1558#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg.
1559 type field of a MAC
1560 ctrl frame. recommended
1561 value: 0x8808 */
1562
1563/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
1564 * register contains comparison
1565 * 0 16 MSB of primary MAC addr [47:32] of DA field
1566 * 1 16 middle bits "" [31:16] of DA field
1567 * 2 16 LSB "" [15:0] of DA field
1568 * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field
1569 * 4*x 16 middle bits "" [31:16]
1570 * 5*x 16 LSB "" [15:0]
1571 * 42 16 MSB of MAC CTRL addr [47:32] of DA.
1572 * 43 16 middle bits "" [31:16]
1573 * 44 16 LSB "" [15:0]
1574 * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
1575 * if there is a match, MAC will set the bit for alternative address
1576 * filter pass [15]
1577
1578 * here is the map of registers given MAC address notation: a:b:c:d:e:f
1579 * ab cd ef
1580 * primary addr reg 2 reg 1 reg 0
1581 * alt addr 1 reg 5 reg 4 reg 3
1582 * alt addr x reg 5*x reg 4*x reg 3*x
1583 * ctrl addr reg 44 reg 43 reg 42
1584 */
1585#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */
1586#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4)
1587#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg
1588 [47:32] */
1589#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg
1590 [31:16] */
1591#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg
1592 [15:0] */
1593#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1
1594 mask reg. 8-bit reg
1595 contains nibble mask for
1596 reg 2 and 1. */
1597#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask
1598 reg */
1599
1600/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
1601 * 16-bit registers contain bits of the hash table.
1602 * reg x -> [16*(15 - x) + 15 : 16*(15 - x)].
1603 * e.g., 15 -> [15:0], 0 -> [255:240]
1604 */
1605#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */
1606#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4)
1607
1608/* statistics registers. these registers generate an interrupt on
1609 * overflow. recommended initialization: 0x0000. most are 16-bits except
1610 * for PEAK_ATTEMPTS register which is 8 bits.
1611 */
1612#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision
1613 counter. */
1614#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt
1615 successful collision
1616 counter */
1617#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision
1618 counter */
1619#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */
1620#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base
1621 is the media byte
1622 clock/256 */
1623#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */
1624#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */
1625#define REG_MAC_LEN_ERR 0x61BC /* length error counter */
1626#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */
1627#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */
1628#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation
1629 error counter */
1630
1631/* misc registers */
1632#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg.
1633 10-bit register used as a
1634 seed for the random number
1635 generator for the CSMA/CD
1636 backoff algorithm. only
1637 programmed after power-on
1638 reset and should be a
1639 random value which has a
1640 high likelihood of being
1641 unique for each MAC
1642 attached to a network
1643 segment (e.g., 10 LSB of
1644 MAC address) */
1645
1646/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
1647 * map
1648 */
1649
1650/* 27-bit register has the current state for key state machines in the MAC */
1651#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */
1652#define MAC_SM_RLM_MASK 0x07800000
1653#define MAC_SM_RLM_SHIFT 23
1654#define MAC_SM_RX_FC_MASK 0x00700000
1655#define MAC_SM_RX_FC_SHIFT 20
1656#define MAC_SM_TLM_MASK 0x000F0000
1657#define MAC_SM_TLM_SHIFT 16
1658#define MAC_SM_ENCAP_SM_MASK 0x0000F000
1659#define MAC_SM_ENCAP_SM_SHIFT 12
1660#define MAC_SM_TX_REQ_MASK 0x00000C00
1661#define MAC_SM_TX_REQ_SHIFT 10
1662#define MAC_SM_TX_FC_MASK 0x000003C0
1663#define MAC_SM_TX_FC_SHIFT 6
1664#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038
1665#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3
1666#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007
1667#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0
1668
1669/** MIF registers. the MIF can be programmed in either bit-bang or
1670 * frame mode.
1671 **/
1672#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock.
1673 1 -> 0 will generate a
1674 rising edge. 0 -> 1 will
1675 generate a falling edge. */
1676#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit
1677 register generates data */
1678#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output
1679 enable. enable when
1680 xmitting data from MIF to
1681 transceiver. */
1682
1683/* 32-bit register serves as an instruction register when the MIF is
1684 * programmed in frame mode. load this register w/ a valid instruction
1685 * (as per IEEE 802.3u MII spec). poll this register to check for instruction
1686 * execution completion. during a read operation, this register will also
1687 * contain the 16-bit data returned by the tranceiver. unless specified
1688 * otherwise, fields are considered "don't care" when polling for
1689 * completion.
1690 */
1691#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */
1692#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame.
1693 load w/ 01 when
1694 issuing an instr */
1695#define MIF_FRAME_ST 0x40000000 /* STart of frame */
1696#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a
1697 write. 10 for a
1698 read */
1699#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */
1700#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */
1701#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when
1702 issuing an instr,
1703 this field should be
1704 loaded w/ the XCVR
1705 addr */
1706#define MIF_FRAME_PHY_ADDR_SHIFT 23
1707#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address.
1708 when issuing an instr,
1709 addr of register
1710 to be read/written */
1711#define MIF_FRAME_REG_ADDR_SHIFT 18
1712#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB.
1713 when issuing an instr,
1714 set this bit to 1 */
1715#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB.
1716 when issuing an instr,
1717 set this bit to 0.
1718 when polling for
1719 completion, 1 means
1720 that instr execution
1721 has been completed */
1722#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload
1723 load with 16-bit data
1724 to be written in
1725 transceiver reg for a
1726 write. doesn't matter
1727 in a read. when
1728 polling for
1729 completion, field is
1730 "don't care" for write
1731 and 16-bit data
1732 returned by the
1733 transceiver for a
1734 read (if valid bit
1735 is set) */
1736#define REG_MIF_CFG 0x6210 /* MIF config reg */
1737#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1
1738 0 -> select MDIO_0 */
1739#define MIF_CFG_POLL_EN 0x0002 /* enable polling
1740 mechanism. if set,
1741 BB_MODE should be 0 */
1742#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode
1743 0 -> frame mode */
1744#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be
1745 used by polling mode.
1746 only meaningful if POLL_EN
1747 is set to 1 */
1748#define MIF_CFG_POLL_REG_SHIFT 3
1749#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose.
1750 when MDIO_0 is idle,
1751 1 -> tranceiver is
1752 connected to MDIO_0.
1753 when MIF is communicating
1754 w/ MDIO_0 in bit-bang
1755 mode, this bit indicates
1756 the incoming bit stream
1757 during a read op */
1758#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose.
1759 when MDIO_1 is idle,
1760 1 -> transceiver is
1761 connected to MDIO_1.
1762 when MIF is communicating
1763 w/ MDIO_1 in bit-bang
1764 mode, this bit indicates
1765 the incoming bit stream
1766 during a read op */
1767#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to
1768 be polled */
1769#define MIF_CFG_POLL_PHY_SHIFT 10
1770
1771/* 16-bit register used to determine which bits in the POLL_STATUS portion of
1772 * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
1773 * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
1774 * set. DEFAULT: 0xFFFF
1775 */
1776#define REG_MIF_MASK 0x6214 /* MIF mask reg */
1777
1778/* 32-bit register used when in poll mode. auto-cleared after being read */
1779#define REG_MIF_STATUS 0x6218 /* MIF status reg */
1780#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains
1781 the "latest image"
1782 update of the XCVR
1783 reg being read */
1784#define MIF_STATUS_POLL_DATA_SHIFT 16
1785#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates
1786 which bits in the
1787 POLL_DATA field have
1788 changed since the
1789 MIF_STATUS reg was
1790 last read */
1791#define MIF_STATUS_POLL_STATUS_SHIFT 0
1792
1793/* 7-bit register has current state for all state machines in the MIF */
1794#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */
1795#define MIF_SM_CONTROL_MASK 0x07 /* control state machine
1796 state */
1797#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine
1798 state */
1799
1800/** PCS/Serialink. the following registers are equivalent to the standard
1801 * MII management registers except that they're directly mapped in
1802 * Cassini's register space.
1803 **/
1804
1805/* the auto-negotiation enable bit should be programmed the same at
1806 * the link partner as in the local device to enable auto-negotiation to
1807 * complete. when that bit is reprogrammed, auto-neg/manual config is
1808 * restarted automatically.
1809 * DEFAULT: 0x1040
1810 */
1811#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */
1812#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on
1813 writes */
1814#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS
1815 to MAC interface is
1816 activated regardless
1817 of activity */
1818#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS
1819 behaviour same for
1820 half and full dplx */
1821#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing.
1822 restart auto-
1823 negotiation */
1824#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored
1825 on writes */
1826#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored
1827 on writes */
1828#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes
1829 through automatic
1830 link config before it
1831 can be used. when 0,
1832 link can be used
1833 w/out any link config
1834 phase */
1835#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on
1836 writes */
1837#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears
1838 when done */
1839
1840/* DEFAULT: 0x0108 */
1841#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */
1842#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */
1843#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */
1844#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up.
1845 0 -> link down. 0 is
1846 latched so that 0 is
1847 kept until read. read
1848 2x to determine if the
1849 link has gone up again */
1850#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform
1851 auto-neg) */
1852#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected
1853 from received link code
1854 word. only valid after
1855 auto-neg completed */
1856#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation
1857 completed
1858 0 -> auto-negotiation not
1859 completed */
1860#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an
1861 indication that this is
1862 a 1000 Base-X PHY. writes
1863 to it are ignored */
1864
1865/* used during auto-negotiation.
1866 * DEFAULT: 0x00E0
1867 */
1868#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement
1869 reg */
1870#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex
1871 1000 Base-X */
1872#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex
1873 1000 Base-X */
1874#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE
1875 symmetric capability */
1876#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE
1877 asymmetric capability */
1878#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13
1879 to optionally indicate to
1880 link partner that chip is
1881 going off-line. bit12 will
1882 get set when signal
1883 detect == FAIL and will
1884 remain set until
1885 successful negotiation */
1886#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */
1887#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */
1888
1889/* contents updated as a result of autonegotiation. layout and definitions
1890 * identical to PCS_MII_ADVERT
1891 */
1892#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner
1893 ability reg */
1894#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD
1895#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD
1896#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE
1897#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE
1898#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK
1899#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK
1900#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE
1901
1902/* DEFAULT: 0x0 */
1903#define REG_PCS_CFG 0x9010 /* PCS config reg */
1904#define PCS_CFG_EN 0x01 /* enable PCS. must be
1905 0 when modifying
1906 PCS_MII_ADVERT */
1907#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to
1908 OK. bit is
1909 non-resettable */
1910#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation
1911 of optical signal to make
1912 signal detect okay when
1913 signal is low */
1914#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter
1915 measurements. a single
1916 code group is xmitted
1917 regularly.
1918 0x0 = normal operation
1919 0x1 = high freq test
1920 pattern, D21.5
1921 0x2 = low freq test
1922 pattern, K28.7
1923 0x3 = reserved */
1924#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto-
1925 negotiation timer to
1926 a few cycles for test
1927 purposes */
1928
1929/* used for diagnostic purposes. bits 20-22 autoclear on read */
1930#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine
1931 and diagnostic reg */
1932#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate
1933 xmission of idle.
1934 otherwise, xmission of
1935 a packet */
1936#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception
1937 of idle. otherwise,
1938 reception of packet */
1939#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of
1940 sync */
1941#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3
1942 indicates reception of
1943 Config codes. cycling
1944 through 0-1 indicates
1945 reception of idles */
1946#define PCS_SM_LINK_STATE_MASK 0x0001E000
1947#define SM_LINK_STATE_UP 0x00016000 /* link state is up */
1948
1949#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to
1950 recept of Config
1951 codes */
1952#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to
1953 loss of sync */
1954#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes
1955 from OK to FAIL. bit29
1956 will also be set if
1957 this is set */
1958#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to
1959 receipt of breaklink
1960 C codes from partner.
1961 C codes w/ 0 content
1962 received triggering
1963 start/restart of
1964 autonegotiation.
1965 should be sent for
1966 no longer than 20ms */
1967#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being
1968 initialized. see serdes
1969 state reg */
1970#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or
1971 not received */
1972#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not
1973 achieved */
1974#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes
1975 w/ ack bit set */
1976#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues
1977 to send C codes
1978 instead of idle
1979 symbols or pkt data */
1980
1981/* this register indicates interrupt changes in specific PCS MII status bits.
1982 * PCS_INT may be masked at the ISR level. only a single bit is implemented
1983 * for link status change.
1984 */
1985#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */
1986#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed
1987 since last read */
1988
1989/* control which network interface is used. no more than one bit should
1990 * be set.
1991 * DEFAULT: none
1992 */
1993#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */
1994#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and
1995 MII/GMII is selected.
1996 selection between MII and
1997 GMII is controlled by
1998 XIF_CFG */
1999#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the
2000 10-bit interface */
2001
2002/* input to serdes chip or serialink block */
2003#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */
2004#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on
2005 serdes interface */
2006#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier
2007 detection. should be
2008 0x0 for normal
2009 operation */
2010#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1]
2011 to REFCLK when set.
2012 when clear, receiver
2013 clock locks to incoming
2014 serial data */
2015
2016/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
2017 * should be 0x0 for normal operations.
2018 * 0b000 normal operation, PROM address[3:0] selected
2019 * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read
2020 * 0b010 rxmac req, rx ack, rx tag, rx clk shared
2021 * 0b011 txmac req, tx ack, tx tag, tx retry req
2022 * 0b100 tx tp3, tx tp2, tx tp1, tx tp0
2023 * 0b101 R period RX, R period TX, R period HP, R period BIM
2024 * DEFAULT: 0x0
2025 */
2026#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */
2027#define PCS_SOS_PROM_ADDR_MASK 0x0007
2028
2029/* used for diagnostics. this register indicates progress of the SERDES
2030 * boot up.
2031 * 0b00 undergoing reset
2032 * 0b01 waiting 500us while lockrefn is asserted
2033 * 0b10 waiting for comma detect
2034 * 0b11 receive data is synchronized
2035 * DEFAULT: 0x0
2036 */
2037#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */
2038#define PCS_SERDES_STATE_MASK 0x03
2039
2040/* used for diagnostics. indicates number of packets transmitted or received.
2041 * counters rollover w/out generating an interrupt.
2042 * DEFAULT: 0x0
2043 */
2044#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */
2045#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */
2046#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS
2047 whether they
2048 encountered an error
2049 or not */
2050
2051/** LocalBus Devices. the following provides run-time access to the
2052 * Cassini's PROM
2053 ***/
2054#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time
2055 access */
2056#define REG_EXPANSION_ROM_RUN_END 0x17FFFF
2057
2058#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus
2059 device */
2060#define REG_SECOND_LOCALBUS_END 0x1FFFFF
2061
2062/* entropy device */
2063#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START
2064#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00)
2065#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04)
2066#define ENTROPY_STATUS_DRDY 0x01
2067#define ENTROPY_STATUS_BUSY 0x02
2068#define ENTROPY_STATUS_CIPHER 0x04
2069#define ENTROPY_STATUS_BYPASS_MASK 0x18
2070#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05)
2071#define ENTROPY_MODE_KEY_MASK 0x07
2072#define ENTROPY_MODE_ENCRYPT 0x40
2073#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06)
2074#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07)
2075#define ENTROPY_RESET_DES_IO 0x01
2076#define ENTROPY_RESET_STC_MODE 0x02
2077#define ENTROPY_RESET_KEY_CACHE 0x04
2078#define ENTROPY_RESET_IV 0x08
2079#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08)
2080#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10)
2081#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x))
2082
2083/* phys of interest w/ their special mii registers */
2084#define PHY_LUCENT_B0 0x00437421
2085#define LUCENT_MII_REG 0x1F
2086
2087#define PHY_NS_DP83065 0x20005c78
2088#define DP83065_MII_MEM 0x16
2089#define DP83065_MII_REGD 0x1D
2090#define DP83065_MII_REGE 0x1E
2091
2092#define PHY_BROADCOM_5411 0x00206071
2093#define PHY_BROADCOM_B0 0x00206050
2094#define BROADCOM_MII_REG4 0x14
2095#define BROADCOM_MII_REG5 0x15
2096#define BROADCOM_MII_REG7 0x17
2097#define BROADCOM_MII_REG8 0x18
2098
2099#define CAS_MII_ANNPTR 0x07
2100#define CAS_MII_ANNPRR 0x08
2101#define CAS_MII_1000_CTRL 0x09
2102#define CAS_MII_1000_STATUS 0x0A
2103#define CAS_MII_1000_EXTEND 0x0F
2104
2105#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */
2106/*
2107 * if autoneg is disabled, here's the table:
2108 * BMCR_SPEED100 = 100Mbps
2109 * BMCR_SPEED1000 = 1000Mbps
2110 * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
2111 */
2112#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */
2113
2114#define CAS_ADVERTISE_1000HALF 0x0100
2115#define CAS_ADVERTISE_1000FULL 0x0200
2116#define CAS_ADVERTISE_PAUSE 0x0400
2117#define CAS_ADVERTISE_ASYM_PAUSE 0x0800
2118
2119/* regular lpa register */
2120#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE
2121#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE
2122
2123/* 1000_STATUS register */
2124#define CAS_LPA_1000HALF 0x0400
2125#define CAS_LPA_1000FULL 0x0800
2126
2127#define CAS_EXTEND_1000XFULL 0x8000
2128#define CAS_EXTEND_1000XHALF 0x4000
2129#define CAS_EXTEND_1000TFULL 0x2000
2130#define CAS_EXTEND_1000THALF 0x1000
2131
2132/* cassini header parser firmware */
2133typedef struct cas_hp_inst {
2134 const char *note;
2135
2136 u16 mask, val;
2137
2138 u8 op;
2139 u8 soff, snext; /* if match succeeds, new offset and match */
2140 u8 foff, fnext; /* if match fails, new offset and match */
2141 /* output info */
2142 u8 outop; /* output opcode */
2143
2144 u16 outarg; /* output argument */
2145 u8 outenab; /* output enable: 0 = not, 1 = if match
2146 2 = if !match, 3 = always */
2147 u8 outshift; /* barrel shift right, 4 bits */
2148 u16 outmask;
2149} cas_hp_inst_t;
2150
2151/* comparison */
2152#define OP_EQ 0 /* packet == value */
2153#define OP_LT 1 /* packet < value */
2154#define OP_GT 2 /* packet > value */
2155#define OP_NP 3 /* new packet */
2156
2157/* output opcodes */
2158#define CL_REG 0
2159#define LD_FID 1
2160#define LD_SEQ 2
2161#define LD_CTL 3
2162#define LD_SAP 4
2163#define LD_R1 5
2164#define LD_L3 6
2165#define LD_SUM 7
2166#define LD_HDR 8
2167#define IM_FID 9
2168#define IM_SEQ 10
2169#define IM_SAP 11
2170#define IM_R1 12
2171#define IM_CTL 13
2172#define LD_LEN 14
2173#define ST_FLG 15
2174
2175/* match setp #s for IP4TCP4 */
2176#define S1_PCKT 0
2177#define S1_VLAN 1
2178#define S1_CFI 2
2179#define S1_8023 3
2180#define S1_LLC 4
2181#define S1_LLCc 5
2182#define S1_IPV4 6
2183#define S1_IPV4c 7
2184#define S1_IPV4F 8
2185#define S1_TCP44 9
2186#define S1_IPV6 10
2187#define S1_IPV6L 11
2188#define S1_IPV6c 12
2189#define S1_TCP64 13
2190#define S1_TCPSQ 14
2191#define S1_TCPFG 15
2192#define S1_TCPHL 16
2193#define S1_TCPHc 17
2194#define S1_CLNP 18
2195#define S1_CLNP2 19
2196#define S1_DROP 20
2197#define S2_HTTP 21
2198#define S1_ESP4 22
2199#define S1_AH4 23
2200#define S1_ESP6 24
2201#define S1_AH6 25
2202
2203#define CAS_PROG_IP46TCP4_PREAMBLE \
2204{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \
2205 CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
2206{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \
2207 IM_CTL, 0x00a, 3, 0x0, 0xffff}, \
2208{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \
2209 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2210{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \
2211 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2212{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \
2213 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2214{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \
2215 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2216{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \
2217 LD_SAP, 0x100, 3, 0x0, 0xffff}, \
2218{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \
2219 LD_SUM, 0x00a, 1, 0x0, 0x0000}, \
2220{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \
2221 LD_LEN, 0x03e, 1, 0x0, 0xffff}, \
2222{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \
2223 LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
2224{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \
2225 LD_SUM, 0x015, 1, 0x0, 0x0000}, \
2226{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \
2227 IM_R1, 0x128, 1, 0x0, 0xffff}, \
2228{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \
2229 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
2230{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
2231 LD_LEN, 0x03f, 1, 0x0, 0xffff}
2232
2233#ifdef USE_HP_IP46TCP4
2234static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
2235 CAS_PROG_IP46TCP4_PREAMBLE,
2236 { "TCP seq", /* DADDR should point to dest port */
2237 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
2238 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2239 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2240 S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2241 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
2242 S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
2243 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2244 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2245 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2246 IM_CTL, 0x001, 3, 0x0, 0x0001},
2247 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2248 IM_CTL, 0x000, 0, 0x0, 0x0000},
2249 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2250 IM_CTL, 0x080, 3, 0x0, 0xffff},
2251 { NULL },
2252};
2253#ifdef HP_IP46TCP4_DEFAULT
2254#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab
2255#endif
2256#endif
2257
2258/*
2259 * Alternate table load which excludes HTTP server traffic from reassembly.
2260 * It is substantially similar to the basic table, with one extra state
2261 * and a few extra compares. */
2262#ifdef USE_HP_IP46TCP4NOHTTP
2263static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
2264 CAS_PROG_IP46TCP4_PREAMBLE,
2265 { "TCP seq", /* DADDR should point to dest port */
2266 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
2267 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */
2268 { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
2269 S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */
2270 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2271 LD_R1, 0x205, 3, 0xB, 0xf000},
2272 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2273 LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2274 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2275 IM_CTL, 0x001, 3, 0x0, 0x0001},
2276 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2277 CL_REG, 0x002, 3, 0x0, 0x0000},
2278 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2279 IM_CTL, 0x080, 3, 0x0, 0xffff},
2280 { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2281 IM_CTL, 0x044, 3, 0x0, 0xffff},
2282 { NULL },
2283};
2284#ifdef HP_IP46TCP4NOHTTP_DEFAULT
2285#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab
2286#endif
2287#endif
2288
2289/* match step #s for IP4FRAG */
2290#define S3_IPV6c 11
2291#define S3_TCP64 12
2292#define S3_TCPSQ 13
2293#define S3_TCPFG 14
2294#define S3_TCPHL 15
2295#define S3_TCPHc 16
2296#define S3_FRAG 17
2297#define S3_FOFF 18
2298#define S3_CLNP 19
2299
2300#ifdef USE_HP_IP4FRAG
2301static cas_hp_inst_t cas_prog_ip4fragtab[] = {
2302 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT,
2303 CL_REG, 0x3ff, 1, 0x0, 0x0000},
2304 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2305 IM_CTL, 0x00a, 3, 0x0, 0xffff},
2306 { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023,
2307 CL_REG, 0x000, 0, 0x0, 0x0000},
2308 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2309 CL_REG, 0x000, 0, 0x0, 0x0000},
2310 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP,
2311 CL_REG, 0x000, 0, 0x0, 0x0000},
2312 { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP,
2313 CL_REG, 0x000, 0, 0x0, 0x0000},
2314 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2315 LD_SAP, 0x100, 3, 0x0, 0xffff},
2316 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP,
2317 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2318 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG,
2319 LD_LEN, 0x03e, 3, 0x0, 0xffff},
2320 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP,
2321 LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2322 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP,
2323 LD_SUM, 0x015, 1, 0x0, 0x0000},
2324 { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP,
2325 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2326 { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
2327 LD_LEN, 0x03f, 1, 0x0, 0xffff},
2328 { "TCP seq", /* DADDR should point to dest port */
2329 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
2330 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2331 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0,
2332 S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2333 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc,
2334 LD_R1, 0x205, 3, 0xB, 0xf000},
2335 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2336 LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2337 { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
2338 LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */
2339 { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
2340 LD_SEQ, 0x040, 1, 0xD, 0xfff8},
2341 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2342 IM_CTL, 0x001, 3, 0x0, 0x0001},
2343 { NULL },
2344};
2345#ifdef HP_IP4FRAG_DEFAULT
2346#define CAS_HP_FIRMWARE cas_prog_ip4fragtab
2347#endif
2348#endif
2349
2350/*
2351 * Alternate table which does batching without reassembly
2352 */
2353#ifdef USE_HP_IP46TCP4BATCH
2354static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
2355 CAS_PROG_IP46TCP4_PREAMBLE,
2356 { "TCP seq", /* DADDR should point to dest port */
2357 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
2358 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2359 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2360 S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */
2361 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
2362 S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
2363 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2364 S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */
2365 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2366 IM_CTL, 0x001, 3, 0x0, 0x0001},
2367 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2368 S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff},
2369 { NULL },
2370};
2371#ifdef HP_IP46TCP4BATCH_DEFAULT
2372#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab
2373#endif
2374#endif
2375
2376/* Workaround for Cassini rev2 descriptor corruption problem.
2377 * Does batching without reassembly, and sets the SAP to a known
2378 * data pattern for all packets.
2379 */
2380#ifdef USE_HP_WORKAROUND
2381static cas_hp_inst_t cas_prog_workaroundtab[] = {
2382 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
2383 S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} ,
2384 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2385 IM_CTL, 0x04a, 3, 0x0, 0xffff},
2386 { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
2387 CL_REG, 0x000, 0, 0x0, 0x0000},
2388 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2389 CL_REG, 0x000, 0, 0x0, 0x0000},
2390 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
2391 CL_REG, 0x000, 0, 0x0, 0x0000},
2392 { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
2393 CL_REG, 0x000, 0, 0x0, 0x0000},
2394 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2395 IM_SAP, 0x6AE, 3, 0x0, 0xffff},
2396 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
2397 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2398 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
2399 LD_LEN, 0x03e, 1, 0x0, 0xffff},
2400 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP,
2401 LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2402 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
2403 LD_SUM, 0x015, 1, 0x0, 0x0000},
2404 { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
2405 IM_R1, 0x128, 1, 0x0, 0xffff},
2406 { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
2407 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2408 { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
2409 LD_LEN, 0x03f, 1, 0x0, 0xffff},
2410 { "TCP seq", /* DADDR should point to dest port */
2411 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
2412 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2413 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2414 S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2415 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2416 LD_R1, 0x205, 3, 0xB, 0xf000},
2417 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2418 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2419 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2420 IM_SAP, 0x6AE, 3, 0x0, 0xffff} ,
2421 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2422 IM_CTL, 0x001, 3, 0x0, 0x0001},
2423 { NULL },
2424};
2425#ifdef HP_WORKAROUND_DEFAULT
2426#define CAS_HP_FIRMWARE cas_prog_workaroundtab
2427#endif
2428#endif
2429
2430#ifdef USE_HP_ENCRYPT
2431static cas_hp_inst_t cas_prog_encryptiontab[] = {
2432 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
2433 S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000},
2434 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2435 IM_CTL, 0x00a, 3, 0x0, 0xffff},
2436#if 0
2437//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
2438//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00
2439 00,
2440#endif
2441 { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
2442 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
2443 CL_REG, 0x000, 0, 0x0, 0x0000},
2444 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2445 CL_REG, 0x000, 0, 0x0, 0x0000},
2446 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
2447 CL_REG, 0x000, 0, 0x0, 0x0000},
2448 { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
2449 CL_REG, 0x000, 0, 0x0, 0x0000},
2450 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2451 LD_SAP, 0x100, 3, 0x0, 0xffff},
2452 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
2453 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2454 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
2455 LD_LEN, 0x03e, 1, 0x0, 0xffff},
2456 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4,
2457 LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2458 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
2459 LD_SUM, 0x015, 1, 0x0, 0x0000},
2460 { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
2461 IM_R1, 0x128, 1, 0x0, 0xffff},
2462 { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
2463 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2464 { "TCP64?",
2465#if 0
2466//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff,
2467#endif
2468 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN,
2469 0x03f, 1, 0x0, 0xffff},
2470 { "TCP seq", /* 14:DADDR should point to dest port */
2471 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
2472 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2473 { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
2474 S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */
2475 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2476 LD_R1, 0x205, 3, 0xB, 0xf000} ,
2477 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2478 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2479 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2480 IM_CTL, 0x001, 3, 0x0, 0x0001},
2481 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2482 CL_REG, 0x002, 3, 0x0, 0x0000},
2483 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2484 IM_CTL, 0x080, 3, 0x0, 0xffff},
2485 { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2486 IM_CTL, 0x044, 3, 0x0, 0xffff},
2487 { "IPV4 ESP encrypted?", /* S1_ESP4 */
2488 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL,
2489 0x021, 1, 0x0, 0xffff},
2490 { "IPV4 AH encrypted?", /* S1_AH4 */
2491 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
2492 0x021, 1, 0x0, 0xffff},
2493 { "IPV6 ESP encrypted?", /* S1_ESP6 */
2494#if 0
2495//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff,
2496#endif
2497 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL,
2498 0x021, 1, 0x0, 0xffff},
2499 { "IPV6 AH encrypted?", /* S1_AH6 */
2500#if 0
2501//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff,
2502#endif
2503 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
2504 0x021, 1, 0x0, 0xffff},
2505 { NULL },
2506};
2507#ifdef HP_ENCRYPT_DEFAULT
2508#define CAS_HP_FIRMWARE cas_prog_encryptiontab
2509#endif
2510#endif
2511
2512static cas_hp_inst_t cas_prog_null[] = { {NULL} };
2513#ifdef HP_NULL_DEFAULT
2514#define CAS_HP_FIRMWARE cas_prog_null
2515#endif
2516
2517/* firmware patch for NS_DP83065 */
2518typedef struct cas_saturn_patch {
2519 u16 addr;
2520 u16 val;
2521} cas_saturn_patch_t;
2522
2523#if 1
2524cas_saturn_patch_t cas_saturn_patch[] = {
2525{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
2526{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
2527{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
2528{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
2529{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
2530{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
2531{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
2532{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
2533{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
2534{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
2535{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
2536{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
2537{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
2538{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
2539{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
2540{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
2541{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
2542{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
2543{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
2544{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
2545{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
2546{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
2547{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
2548{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
2549{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
2550{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
2551{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
2552{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
2553{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
2554{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
2555{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
2556{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
2557{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
2558{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
2559{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
2560{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
2561{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
2562{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
2563{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
2564{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
2565{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
2566{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
2567{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
2568{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
2569{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
2570{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
2571{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
2572{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
2573{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
2574{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
2575{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
2576{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
2577{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
2578{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
2579{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
2580{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
2581{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
2582{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
2583{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
2584{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
2585{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
2586{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
2587{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
2588{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
2589{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
2590{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
2591{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
2592{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
2593{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
2594{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
2595{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
2596{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
2597{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
2598{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
2599{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
2600{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
2601{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
2602{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
2603{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
2604{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
2605{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
2606{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
2607{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
2608{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
2609{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
2610{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
2611{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
2612{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
2613{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
2614{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
2615{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
2616{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
2617{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
2618{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
2619{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
2620{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
2621{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
2622{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
2623{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
2624{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
2625{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
2626{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
2627{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
2628{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
2629{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
2630{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
2631{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
2632{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
2633{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
2634{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
2635{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
2636{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
2637{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
2638{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
2639{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
2640{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
2641{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
2642{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
2643{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
2644{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
2645{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
2646{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
2647{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
2648{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
2649{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
2650{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
2651{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
2652{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
2653{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
2654{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
2655{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
2656{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
2657{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
2658{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
2659{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
2660{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
2661{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
2662{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
2663{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
2664{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
2665{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
2666{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
2667{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
2668{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
2669{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
2670{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
2671{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
2672{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
2673{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
2674{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
2675{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
2676{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
2677{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
2678{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
2679{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
2680{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
2681{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
2682{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
2683{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
2684{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
2685{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
2686{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
2687{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
2688{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
2689{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
2690{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
2691{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
2692{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
2693{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
2694{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
2695{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
2696{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
2697{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
2698{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
2699{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
2700{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
2701{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
2702{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
2703{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
2704{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
2705{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
2706{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
2707{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
2708{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
2709{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
2710{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
2711{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
2712{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
2713{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
2714{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
2715{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
2716{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
2717{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
2718{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
2719{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
2720{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
2721{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
2722{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
2723{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
2724{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
2725{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
2726{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
2727{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
2728{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
2729{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
2730{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
2731{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
2732{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
2733{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
2734{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
2735{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
2736{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
2737{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
2738{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
2739{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
2740{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
2741{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
2742{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
2743{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
2744{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
2745{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
2746{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
2747{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
2748{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
2749{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
2750{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
2751{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
2752{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
2753{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
2754{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
2755{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
2756{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
2757{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
2758{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
2759{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
2760{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
2761{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
2762{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
2763{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
2764{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
2765{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
2766{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
2767{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
2768{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
2769{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
2770{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
2771{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
2772{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
2773{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
2774{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
2775{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
2776{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
2777{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
2778{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
2779{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
2780{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
2781{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
2782{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
2783{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
2784{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
2785{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
2786{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
2787{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
2788{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
2789{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
2790{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
2791{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
2792{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
2793{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
2794{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
2795{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
2796{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
2797{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
2798{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
2799{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
2800{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
2801{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
2802{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
2803{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
2804{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
2805{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
2806{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
2807{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
2808{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
2809{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
2810{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
2811{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
2812{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
2813{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
2814{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
2815{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
2816{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
2817{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
2818{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
2819{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
2820{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
2821{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
2822{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
2823{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
2824{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
2825{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
2826{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
2827{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
2828{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
2829{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
2830{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
2831{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
2832{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
2833{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
2834{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
2835{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
2836{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
2837{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
2838{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
2839{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
2840{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
2841{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
2842{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
2843{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
2844{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
2845{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
2846{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
2847{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
2848{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
2849{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
2850{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
2851{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
2852{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
2853{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
2854{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
2855{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
2856{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
2857{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
2858{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
2859{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
2860{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
2861{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
2862{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
2863{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
2864{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
2865{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
2866{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
2867{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
2868{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
2869{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
2870{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
2871{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
2872{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
2873{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
2874{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
2875{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
2876{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
2877{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
2878{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
2879{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
2880{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
2881{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
2882{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
2883{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
2884{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
2885{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
2886{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
2887{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
2888{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
2889{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
2890{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
2891{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
2892{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
2893{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
2894{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
2895{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
2896{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
2897{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
2898{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
2899{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
2900{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
2901{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
2902{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
2903{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
2904{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
2905{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
2906{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
2907{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
2908{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
2909{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
2910{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
2911{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
2912{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
2913{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
2914{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
2915{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
2916{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
2917{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
2918{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
2919{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
2920{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
2921{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
2922{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
2923{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
2924{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
2925{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
2926{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
2927{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
2928{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
2929{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
2930{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
2931{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
2932{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
2933{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
2934{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
2935{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
2936{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
2937{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
2938{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
2939{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
2940{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
2941{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
2942{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
2943{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
2944{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
2945{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
2946{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
2947{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
2948{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
2949{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
2950{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
2951{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
2952{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
2953{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
2954{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
2955{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
2956{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
2957{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
2958{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
2959{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
2960{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
2961{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
2962{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
2963{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
2964{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
2965{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
2966{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
2967{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
2968{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
2969{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
2970{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
2971{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
2972{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
2973{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
2974{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
2975{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
2976{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
2977{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
2978{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
2979{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
2980{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
2981{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
2982{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
2983{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
2984{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
2985{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
2986{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
2987{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
2988{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
2989{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
2990{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
2991{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
2992{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
2993{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
2994{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
2995{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
2996{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
2997{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
2998{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
2999{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3000{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3001{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3002{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3003{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3004{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3005{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3006{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3007{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3008{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3009{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3010{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3011{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3012{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3013{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3014{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3015{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3016{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3017{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3018{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3019{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3020{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3021{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3022{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3023{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3024{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3025{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3026{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3027{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3028{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3029{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3030{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3031{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3032{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3033{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3034{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000},
3035{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3036{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3037{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3038{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3039{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3040{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3041{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3042{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3043{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3044{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3045{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3046{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3047{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3048{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3049{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3050{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3051{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3052{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3053{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3054{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3055{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3056{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3057{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3058{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3059{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3060{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3061{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3062{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3063{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3064{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3065{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3066{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3067{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3068{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3069{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3070{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3071{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3072{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3073{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3074{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3075{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3076{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3077{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3078{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3079{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3080{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3081{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3082{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3083{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3084{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3085{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3086{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3087{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3088{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3089{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3090{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3091{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3092{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3093{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3094{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3095{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3096{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3097{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3098{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3099{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3100{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3101{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3102{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3103{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3104{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3105{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3106{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3107{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3108{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3109{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3110{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3111{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3112{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3113{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3114{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3115{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3116{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3117{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3118{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3119{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3120{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3121{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3122{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3123{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3124{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3125{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3126{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3127{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3128{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3129{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3130{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3131{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3132{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3133{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3134{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3135{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3136{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3137{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3138{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3139{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3140{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3141{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3142{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3143{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3144{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3145{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3146{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3147{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3148{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3149{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3150{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3151{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3152{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3153{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3154{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3155{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3156{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3157{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3158{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3159{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3160{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3161{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3162{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3163{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3164{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3165{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3166{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3167{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3168{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3169{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3170{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3171{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3172{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3173{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3174{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3175{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3176{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3177{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3178{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3179{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3180{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3181{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3182{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3183{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3184{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3185{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3186{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3187{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3188{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3189{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3190{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3191{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3192{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3193{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3194{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3195{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3196{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3197{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3198{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3199{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3200{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3201{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3202{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3203{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3204{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3205{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3206{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3207{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3208{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3209{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3210{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3211{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3212{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3213{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3214{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3215{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3216{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3217{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3218{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3219{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3220{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3221{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3222{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3223{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3224{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3225{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3226{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3227{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3228{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3229{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3230{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3231{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3232{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3233{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3234{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3235{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3236{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3237{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3238{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3239{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3240{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3241{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3242{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3243{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3244{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3245{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
3246{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
3247{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
3248{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
3249{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
3250{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
3251{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
3252{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
3253{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
3254{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
3255{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
3256{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
3257{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
3258{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
3259{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
3260{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
3261{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
3262{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
3263{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
3264{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
3265{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
3266{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
3267{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
3268{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
3269{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
3270{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
3271{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
3272{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
3273{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
3274{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
3275{0x8aca, 0x0039}, { 0x0, 0x0 }
3276};
3277#else
3278cas_saturn_patch_t cas_saturn_patch[] = {
3279{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
3280{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
3281{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
3282{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
3283{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
3284{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
3285{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
3286{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
3287{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
3288{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
3289{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
3290{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
3291{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
3292{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
3293{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
3294{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
3295{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
3296{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
3297{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
3298{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
3299{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
3300{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
3301{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
3302{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
3303{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
3304{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
3305{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
3306{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
3307{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
3308{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
3309{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
3310{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
3311{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
3312{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
3313{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
3314{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
3315{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
3316{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
3317{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
3318{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
3319{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
3320{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
3321{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
3322{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
3323{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
3324{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
3325{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
3326{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
3327{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
3328{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
3329{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
3330{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
3331{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
3332{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
3333{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
3334{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
3335{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
3336{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
3337{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
3338{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
3339{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
3340{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
3341{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
3342{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
3343{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
3344{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
3345{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
3346{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
3347{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
3348{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
3349{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
3350{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
3351{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
3352{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
3353{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
3354{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
3355{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
3356{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
3357{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
3358{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
3359{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
3360{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
3361{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
3362{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
3363{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
3364{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
3365{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
3366{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
3367{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
3368{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
3369{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
3370{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
3371{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
3372{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
3373{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
3374{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
3375{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
3376{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
3377{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
3378{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
3379{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
3380{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
3381{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
3382{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
3383{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
3384{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
3385{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
3386{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
3387{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
3388{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
3389{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
3390{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
3391{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
3392{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
3393{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
3394{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
3395{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
3396{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
3397{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
3398{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
3399{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
3400{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
3401{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
3402{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
3403{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
3404{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
3405{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
3406{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
3407{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
3408{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
3409{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
3410{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
3411{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
3412{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
3413{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
3414{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
3415{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
3416{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
3417{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
3418{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
3419{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
3420{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
3421{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
3422{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
3423{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
3424{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
3425{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
3426{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
3427{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
3428{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
3429{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
3430{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
3431{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
3432{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
3433{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
3434{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
3435{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
3436{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
3437{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
3438{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
3439{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
3440{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
3441{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
3442{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
3443{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
3444{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
3445{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
3446{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
3447{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
3448{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
3449{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
3450{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
3451{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
3452{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
3453{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
3454{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
3455{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
3456{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
3457{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
3458{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
3459{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
3460{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
3461{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
3462{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
3463{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
3464{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
3465{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
3466{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
3467{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
3468{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
3469{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
3470{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
3471{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
3472{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
3473{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
3474{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
3475{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
3476{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
3477{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
3478{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
3479{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
3480{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
3481{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
3482{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
3483{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
3484{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
3485{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
3486{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
3487{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
3488{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
3489{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
3490{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
3491{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
3492{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
3493{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
3494{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
3495{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
3496{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
3497{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
3498{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
3499{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
3500{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
3501{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
3502{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
3503{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
3504{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
3505{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
3506{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
3507{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
3508{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
3509{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
3510{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
3511{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
3512{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
3513{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
3514{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
3515{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
3516{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
3517{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
3518{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
3519{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
3520{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
3521{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
3522{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
3523{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
3524{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
3525{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
3526{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
3527{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
3528{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
3529{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
3530{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
3531{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
3532{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
3533{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
3534{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
3535{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
3536{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
3537{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
3538{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
3539{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
3540{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
3541{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
3542{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
3543{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
3544{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
3545{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
3546{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
3547{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
3548{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
3549{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
3550{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
3551{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
3552{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
3553{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
3554{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
3555{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
3556{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
3557{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
3558{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
3559{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
3560{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
3561{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
3562{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
3563{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
3564{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
3565{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
3566{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
3567{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
3568{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
3569{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
3570{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
3571{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
3572{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
3573{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
3574{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
3575{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
3576{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
3577{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
3578{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
3579{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
3580{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
3581{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
3582{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
3583{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
3584{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
3585{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
3586{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
3587{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
3588{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
3589{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
3590{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
3591{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
3592{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
3593{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
3594{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
3595{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
3596{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
3597{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
3598{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
3599{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
3600{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
3601{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
3602{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
3603{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
3604{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
3605{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
3606{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
3607{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
3608{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
3609{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
3610{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
3611{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
3612{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
3613{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
3614{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
3615{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
3616{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
3617{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
3618{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
3619{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
3620{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
3621{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
3622{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
3623{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
3624{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
3625{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
3626{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
3627{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
3628{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
3629{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
3630{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
3631{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
3632{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
3633{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
3634{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
3635{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
3636{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
3637{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
3638{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
3639{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
3640{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
3641{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
3642{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
3643{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
3644{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
3645{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
3646{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
3647{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
3648{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
3649{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
3650{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
3651{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
3652{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
3653{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
3654{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
3655{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
3656{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
3657{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
3658{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
3659{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
3660{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
3661{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
3662{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
3663{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
3664{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
3665{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
3666{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
3667{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
3668{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
3669{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
3670{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
3671{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
3672{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
3673{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
3674{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
3675{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
3676{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
3677{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
3678{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
3679{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
3680{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
3681{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
3682{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
3683{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
3684{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
3685{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
3686{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
3687{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
3688{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
3689{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
3690{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
3691{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
3692{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
3693{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
3694{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
3695{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
3696{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
3697{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
3698{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
3699{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
3700{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
3701{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
3702{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
3703{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
3704{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
3705{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
3706{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
3707{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
3708{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
3709{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
3710{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
3711{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
3712{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
3713{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
3714{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
3715{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
3716{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
3717{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
3718{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
3719{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
3720{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
3721{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
3722{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
3723{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
3724{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
3725{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
3726{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
3727{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
3728{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
3729{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
3730{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
3731{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
3732{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
3733{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
3734{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
3735{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
3736{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
3737{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
3738{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
3739{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
3740{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
3741{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
3742{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
3743{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
3744{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
3745{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
3746{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
3747{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
3748{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
3749{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
3750{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
3751{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
3752{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
3753{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3754{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3755{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3756{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3757{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3758{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3759{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3760{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3761{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3762{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3763{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3764{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3765{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3766{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3767{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3768{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3769{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3770{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3771{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3772{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3773{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3774{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3775{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3776{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3777{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3778{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3779{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3780{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3781{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3782{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3783{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3784{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3785{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3786{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3787{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3788{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e},
3789{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3790{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3791{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3792{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3793{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3794{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3795{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3796{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3797{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3798{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3799{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3800{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3801{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3802{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3803{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3804{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3805{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3806{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3807{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3808{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3809{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3810{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3811{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3812{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3813{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3814{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3815{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3816{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3817{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3818{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3819{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3820{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3821{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3822{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3823{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3824{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3825{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3826{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3827{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3828{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3829{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3830{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3831{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3832{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3833{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3834{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3835{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3836{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3837{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3838{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3839{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3840{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3841{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3842{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3843{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3844{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3845{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3846{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3847{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3848{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3849{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3850{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3851{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3852{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3853{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3854{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3855{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3856{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3857{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3858{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3859{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3860{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3861{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3862{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3863{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3864{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3865{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3866{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3867{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3868{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3869{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3870{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3871{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3872{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3873{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3874{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3875{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3876{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3877{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3878{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3879{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3880{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3881{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3882{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3883{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3884{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3885{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3886{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3887{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3888{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3889{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3890{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3891{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3892{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3893{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3894{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3895{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3896{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3897{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3898{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3899{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3900{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3901{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3902{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3903{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3904{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3905{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3906{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3907{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3908{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3909{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3910{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3911{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3912{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3913{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3914{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3915{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3916{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3917{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3918{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3919{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3920{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3921{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3922{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3923{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3924{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3925{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3926{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3927{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3928{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3929{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3930{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3931{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3932{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3933{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3934{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3935{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3936{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3937{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3938{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3939{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3940{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3941{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3942{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3943{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3944{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3945{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3946{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3947{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3948{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3949{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3950{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3951{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3952{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3953{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3954{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3955{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3956{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3957{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3958{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3959{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3960{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3961{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3962{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3963{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3964{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3965{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3966{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3967{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3968{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3969{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3970{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3971{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3972{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3973{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3974{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3975{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3976{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3977{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3978{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3979{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3980{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3981{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3982{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3983{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3984{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3985{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3986{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3987{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3988{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3989{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3990{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3991{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3992{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3993{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3994{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3995{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3996{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3997{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3998{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3999{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
4000{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
4001{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
4002{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
4003{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
4004{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
4005{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
4006{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
4007{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
4008{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
4009{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
4010{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
4011{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
4012{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
4013{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
4014{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
4015{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
4016{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
4017{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
4018{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
4019{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
4020{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
4021{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
4022{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
4023{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
4024{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
4025{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
4026{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
4027{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
4028{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
4029{0x8aca, 0x0039}, { 0x0, 0x0 }
4030};
4031#endif
4032
4033
4034/* phy types */
4035#define CAS_PHY_UNKNOWN 0x00
4036#define CAS_PHY_SERDES 0x01
4037#define CAS_PHY_MII_MDIO0 0x02
4038#define CAS_PHY_MII_MDIO1 0x04
4039#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
4040
4041/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE
4042 * is the actual size. the default index for the various rings is
4043 * 8. NOTE: there a bunch of alignment constraints for the rings. to
4044 * deal with that, i just allocate rings to create the desired
4045 * alignment. here are the constraints:
4046 * RX DESC and COMP rings must be 8KB aligned
4047 * TX DESC must be 2KB aligned.
4048 * if you change the numbers, be cognizant of how the alignment will change
4049 * in INIT_BLOCK as well.
4050 */
4051
4052#define DESC_RING_I_TO_S(x) (32*(1 << (x)))
4053#define COMP_RING_I_TO_S(x) (128*(1 << (x)))
4054#define TX_DESC_RING_INDEX 4 /* 512 = 8k */
4055#define RX_DESC_RING_INDEX 4 /* 512 = 8k */
4056#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */
4057
4058#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
4059#error TX_DESC_RING_INDEX must be between 0 and 8
4060#endif
4061
4062#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
4063#error RX_DESC_RING_INDEX must be between 0 and 8
4064#endif
4065
4066#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
4067#error RX_COMP_RING_INDEX must be between 0 and 8
4068#endif
4069
4070#define N_TX_RINGS MAX_TX_RINGS /* for QoS */
4071#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK
4072#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */
4073#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */
4074
4075/* number of flows that can go through re-assembly */
4076#define N_RX_FLOWS 64
4077
4078#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
4079#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
4080#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
4081#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
4082#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
4083#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
4084#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE
4085#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE
4086#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE
4087
4088/* convert values */
4089#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK))
4090#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT))
4091#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \
4092 TX_CFG_DESC_RINGN_SHIFT(y)) & \
4093 TX_CFG_DESC_RINGN_MASK(y))
4094
4095/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
4096#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */
4097#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */
4098#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */
4099
4100#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in
4101 bytes. 0 - 9256 */
4102#define TX_DESC_BUFLEN_SHIFT 0
4103#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. #
4104 of bytes to be
4105 skipped before
4106 csum calc begins.
4107 value must be
4108 even */
4109#define TX_DESC_CSUM_START_SHIFT 15
4110#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff.
4111 byte offset w/in
4112 the pkt for the
4113 1st csum byte.
4114 must be > 8 */
4115#define TX_DESC_CSUM_STUFF_SHIFT 21
4116#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */
4117#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */
4118#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */
4119#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */
4120#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only.
4121 CRC will not be
4122 inserted into
4123 outgoing frame. */
4124struct cas_tx_desc {
4125 u64 control;
4126 u64 buffer;
4127};
4128
4129/* descriptor ring for free buffers contains page-sized buffers. the index
4130 * value is not used by the hw in any way. it's just stored and returned in
4131 * the completion ring.
4132 */
4133struct cas_rx_desc {
4134 u64 index;
4135 u64 buffer;
4136};
4137
4138/* received packets are put on the completion ring. */
4139/* word 1 */
4140#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL
4141#define RX_COMP1_DATA_SIZE_SHIFT 13
4142#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL
4143#define RX_COMP1_DATA_OFF_SHIFT 27
4144#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL
4145#define RX_COMP1_DATA_INDEX_SHIFT 41
4146#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL
4147#define RX_COMP1_SKIP_SHIFT 55
4148#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL
4149#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL
4150#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL
4151#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL
4152#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL
4153#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL
4154#define RX_COMP1_TYPE_SHIFT 62
4155
4156/* word 2 */
4157#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL
4158#define RX_COMP2_NEXT_INDEX_SHIFT 21
4159#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL
4160#define RX_COMP2_HDR_SIZE_SHIFT 35
4161#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL
4162#define RX_COMP2_HDR_OFF_SHIFT 44
4163#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL
4164#define RX_COMP2_HDR_INDEX_SHIFT 50
4165
4166/* word 3 */
4167#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL
4168#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL
4169#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL
4170#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL
4171#define RX_COMP3_CSUM_START_SHIFT 12
4172#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL
4173#define RX_COMP3_FLOWID_SHIFT 19
4174#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL
4175#define RX_COMP3_OPCODE_SHIFT 25
4176#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL
4177#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL
4178#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL
4179#define RX_COMP3_LOAD_BAL_SHIFT 35
4180#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */
4181#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */
4182#define RX_COMP3_L3_HEAD_OFF_SHIFT 41
4183#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */
4184#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42
4185#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL
4186#define RX_COMP3_SAP_SHIFT 48
4187
4188/* word 4 */
4189#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL
4190#define RX_COMP4_TCP_CSUM_SHIFT 0
4191#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL
4192#define RX_COMP4_PKT_LEN_SHIFT 16
4193#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL
4194#define RX_COMP4_PERFECT_MATCH_SHIFT 30
4195#define RX_COMP4_ZERO 0x0000080000000000ULL
4196#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL
4197#define RX_COMP4_HASH_VAL_SHIFT 44
4198#define RX_COMP4_HASH_PASS 0x1000000000000000ULL
4199#define RX_COMP4_BAD 0x4000000000000000ULL
4200#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL
4201
4202/* we encode the following: ring/index/release. only 14 bits
4203 * are usable.
4204 * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
4205 * MAX_RX_DESC_RINGS. */
4206#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL
4207#define RX_INDEX_NUM_SHIFT 0
4208#define RX_INDEX_RING_MASK 0x0000000000001000ULL
4209#define RX_INDEX_RING_SHIFT 12
4210#define RX_INDEX_RELEASE 0x0000000000002000ULL
4211
4212struct cas_rx_comp {
4213 u64 word1;
4214 u64 word2;
4215 u64 word3;
4216 u64 word4;
4217};
4218
4219enum link_state {
4220 link_down = 0, /* No link, will retry */
4221 link_aneg, /* Autoneg in progress */
4222 link_force_try, /* Try Forced link speed */
4223 link_force_ret, /* Forced mode worked, retrying autoneg */
4224 link_force_ok, /* Stay in forced mode */
4225 link_up /* Link is up */
4226};
4227
4228typedef struct cas_page {
4229 struct list_head list;
4230 struct page *buffer;
4231 dma_addr_t dma_addr;
4232 int used;
4233} cas_page_t;
4234
4235
4236/* some alignment constraints:
4237 * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
4238 * TX COMPWB must be 8-byte aligned.
4239 * to accomplish this, here's what we do:
4240 *
4241 * INIT_BLOCK_RX_COMP = 64k (already aligned)
4242 * INIT_BLOCK_RX_DESC = 8k
4243 * INIT_BLOCK_TX = 8k
4244 * INIT_BLOCK_RX1_DESC = 8k
4245 * TX COMPWB
4246 */
4247#define INIT_BLOCK_TX (TX_DESC_RING_SIZE)
4248#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE)
4249#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE)
4250
4251struct cas_init_block {
4252 struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
4253 struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
4254 struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
4255 u64 tx_compwb;
4256};
4257
4258/* tiny buffers to deal with target abort issue. we allocate a bit
4259 * over so that we don't have target abort issues with these buffers
4260 * as well.
4261 */
4262#define TX_TINY_BUF_LEN 0x100
4263#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
4264
4265struct cas_tiny_count {
4266 int nbufs;
4267 int used;
4268};
4269
4270struct cas {
4271 spinlock_t lock; /* for most bits */
4272 spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
4273 spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
4274 spinlock_t rx_inuse_lock; /* rx inuse list */
4275 spinlock_t rx_spare_lock; /* rx spare list */
4276
4277 void __iomem *regs;
4278 int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
4279 int rx_old[N_RX_DESC_RINGS];
4280 int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
4281 int rx_last[N_RX_DESC_RINGS];
4282
4283 /* Set when chip is actually in operational state
4284 * (ie. not power managed) */
4285 int hw_running;
4286 int opened;
4287 struct semaphore pm_sem; /* open/close/suspend/resume */
4288
4289 struct cas_init_block *init_block;
4290 struct cas_tx_desc *init_txds[MAX_TX_RINGS];
4291 struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
4292 struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
4293
4294 /* we use sk_buffs for tx and pages for rx. the rx skbuffs
4295 * are there for flow re-assembly. */
4296 struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
4297 struct sk_buff_head rx_flows[N_RX_FLOWS];
4298 cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
4299 struct list_head rx_spare_list, rx_inuse_list;
4300 int rx_spares_needed;
4301
4302 /* for small packets when copying would be quicker than
4303 mapping */
4304 struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
4305 u8 *tx_tiny_bufs[N_TX_RINGS];
4306
4307 u32 msg_enable;
4308
4309 /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
4310 struct net_device_stats net_stats[N_TX_RINGS + 1];
4311
4312 u32 pci_cfg[64 >> 2];
4313 u8 pci_revision;
4314
4315 int phy_type;
4316 int phy_addr;
4317 u32 phy_id;
4318#define CAS_FLAG_1000MB_CAP 0x00000001
4319#define CAS_FLAG_REG_PLUS 0x00000002
4320#define CAS_FLAG_TARGET_ABORT 0x00000004
4321#define CAS_FLAG_SATURN 0x00000008
4322#define CAS_FLAG_RXD_POST_MASK 0x000000F0
4323#define CAS_FLAG_RXD_POST_SHIFT 4
4324#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
4325 CAS_FLAG_RXD_POST_MASK)
4326#define CAS_FLAG_ENTROPY_DEV 0x00000100
4327#define CAS_FLAG_NO_HW_CSUM 0x00000200
4328 u32 cas_flags;
4329 int packet_min; /* minimum packet size */
4330 int tx_fifo_size;
4331 int rx_fifo_size;
4332 int rx_pause_off;
4333 int rx_pause_on;
4334 int crc_size; /* 4 if half-duplex */
4335
4336 int pci_irq_INTC;
4337 int min_frame_size; /* for tx fifo workaround */
4338
4339 /* page size allocation */
4340 int page_size;
4341 int page_order;
4342 int mtu_stride;
4343
4344 u32 mac_rx_cfg;
4345
4346 /* Autoneg & PHY control */
4347 int link_cntl;
4348 int link_fcntl;
4349 enum link_state lstate;
4350 struct timer_list link_timer;
4351 int timer_ticks;
4352 struct work_struct reset_task;
4353#if 0
4354 atomic_t reset_task_pending;
4355#else
4356 atomic_t reset_task_pending;
4357 atomic_t reset_task_pending_mtu;
4358 atomic_t reset_task_pending_spare;
4359 atomic_t reset_task_pending_all;
4360#endif
4361
4362#ifdef CONFIG_CASSINI_QGE_DEBUG
4363 atomic_t interrupt_seen; /* 1 if any interrupts are getting through */
4364#endif
4365
4366 /* Link-down problem workaround */
4367#define LINK_TRANSITION_UNKNOWN 0
4368#define LINK_TRANSITION_ON_FAILURE 1
4369#define LINK_TRANSITION_STILL_FAILED 2
4370#define LINK_TRANSITION_LINK_UP 3
4371#define LINK_TRANSITION_LINK_CONFIG 4
4372#define LINK_TRANSITION_LINK_DOWN 5
4373#define LINK_TRANSITION_REQUESTED_RESET 6
4374 int link_transition;
4375 int link_transition_jiffies_valid;
4376 unsigned long link_transition_jiffies;
4377
4378 /* Tuning */
4379 u8 orig_cacheline_size; /* value when loaded */
4380#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */
4381
4382 /* Diagnostic counters and state. */
4383 int casreg_len; /* reg-space size for dumping */
4384 u64 pause_entered;
4385 u16 pause_last_time_recvd;
4386
4387 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
4388 struct pci_dev *pdev;
4389 struct net_device *dev;
4390};
4391
4392#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
4393#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
4394#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
4395
4396#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \
4397 (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
4398
4399#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
4400 (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
4401 (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
4402
4403#define CAS_ALIGN(addr, align) \
4404 (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
4405
4406#define RX_FIFO_SIZE 16384
4407#define EXPANSION_ROM_SIZE 65536
4408
4409#define CAS_MC_EXACT_MATCH_SIZE 15
4410#define CAS_MC_HASH_SIZE 256
4411#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \
4412 CAS_MC_HASH_SIZE)
4413
4414#define TX_TARGET_ABORT_LEN 0x20
4415#define RX_SWIVEL_OFF_VAL 0x2
4416#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1)
4417#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1)
4418#define RX_BLANK_INTR_PKT_VAL 0x05
4419#define RX_BLANK_INTR_TIME_VAL 0x0F
4420#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */
4421
4422#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1)
4423#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2)
4424
4425#endif /* _CASSINI_H */
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index cdc07ccd7332..a6078ad9b654 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -140,6 +140,7 @@
140 140
141#include <asm/system.h> 141#include <asm/system.h>
142#include <asm/io.h> 142#include <asm/io.h>
143#include <asm/irq.h>
143#if ALLOW_DMA 144#if ALLOW_DMA
144#include <asm/dma.h> 145#include <asm/dma.h>
145#endif 146#endif
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 32d5fabd4b10..a2c4dd4fb221 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -99,7 +99,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
103static struct proc_dir_entry *ibmveth_proc_dir; 103static struct proc_dir_entry *ibmveth_proc_dir;
104#endif 104#endif
105 105
@@ -1010,7 +1010,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1010#ifdef CONFIG_PROC_FS 1010#ifdef CONFIG_PROC_FS
1011static void ibmveth_proc_register_driver(void) 1011static void ibmveth_proc_register_driver(void)
1012{ 1012{
1013 ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net); 1013 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1014 if (ibmveth_proc_dir) { 1014 if (ibmveth_proc_dir) {
1015 SET_MODULE_OWNER(ibmveth_proc_dir); 1015 SET_MODULE_OWNER(ibmveth_proc_dir);
1016 } 1016 }
@@ -1018,7 +1018,7 @@ static void ibmveth_proc_register_driver(void)
1018 1018
1019static void ibmveth_proc_unregister_driver(void) 1019static void ibmveth_proc_unregister_driver(void)
1020{ 1020{
1021 remove_proc_entry(IBMVETH_PROC_DIR, proc_net); 1021 remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1022} 1022}
1023 1023
1024static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1024static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 6d9de626c967..651c5a6578fd 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1875,11 +1875,11 @@ static int __init vlsi_mod_init(void)
1875 1875
1876 sirpulse = !!sirpulse; 1876 sirpulse = !!sirpulse;
1877 1877
1878 /* create_proc_entry returns NULL if !CONFIG_PROC_FS. 1878 /* proc_mkdir returns NULL if !CONFIG_PROC_FS.
1879 * Failure to create the procfs entry is handled like running 1879 * Failure to create the procfs entry is handled like running
1880 * without procfs - it's not required for the driver to work. 1880 * without procfs - it's not required for the driver to work.
1881 */ 1881 */
1882 vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL); 1882 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
1883 if (vlsi_proc_root) { 1883 if (vlsi_proc_root) {
1884 /* protect registered procdir against module removal. 1884 /* protect registered procdir against module removal.
1885 * Because we are in the module init path there's no race 1885 * Because we are in the module init path there's no race
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index ed72a23c85dd..bc354a80c099 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
584 return 0; 584 return 0;
585} 585}
586 586
587static inline int rx_refill(struct net_device *ndev, int gfp) 587static inline int rx_refill(struct net_device *ndev, unsigned int gfp)
588{ 588{
589 struct ns83820 *dev = PRIV(ndev); 589 struct ns83820 *dev = PRIV(ndev);
590 unsigned i; 590 unsigned i;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index d652e1eddb45..c7cca842e5ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
1832{ 1832{
1833 struct dev_mc_list *mc_addr; 1833 struct dev_mc_list *mc_addr;
1834 1834
1835 for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { 1835 for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
1836 u_int position = ether_crc(6, mc_addr->dmi_addr); 1836 u_int position = ether_crc(6, mc_addr->dmi_addr);
1837#ifndef final_version /* Verify multicast address. */ 1837#ifndef final_version /* Verify multicast address. */
1838 if ((mc_addr->dmi_addr[0] & 1) == 0) 1838 if ((mc_addr->dmi_addr[0] & 1) == 0)
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 82f236cc3b9b..a842ecc60a34 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1070,7 +1070,7 @@ static int __init pppoe_proc_init(void)
1070{ 1070{
1071 struct proc_dir_entry *p; 1071 struct proc_dir_entry *p;
1072 1072
1073 p = create_proc_entry("pppoe", S_IRUGO, proc_net); 1073 p = create_proc_entry("net/pppoe", S_IRUGO, NULL);
1074 if (!p) 1074 if (!p)
1075 return -ENOMEM; 1075 return -ENOMEM;
1076 1076
@@ -1142,7 +1142,7 @@ static void __exit pppoe_exit(void)
1142 dev_remove_pack(&pppoes_ptype); 1142 dev_remove_pack(&pppoes_ptype);
1143 dev_remove_pack(&pppoed_ptype); 1143 dev_remove_pack(&pppoed_ptype);
1144 unregister_netdevice_notifier(&pppoe_notifier); 1144 unregister_netdevice_notifier(&pppoe_notifier);
1145 remove_proc_entry("pppoe", proc_net); 1145 remove_proc_entry("net/pppoe", NULL);
1146 proto_unregister(&pppoe_sk_proto); 1146 proto_unregister(&pppoe_sk_proto);
1147} 1147}
1148 1148
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 40c40eba2581..159b56a56ef4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -92,8 +92,7 @@ VERSION 2.2LK <2005/01/25>
92#endif /* RTL8169_DEBUG */ 92#endif /* RTL8169_DEBUG */
93 93
94#define R8169_MSG_DEFAULT \ 94#define R8169_MSG_DEFAULT \
95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
96 NETIF_MSG_IFDOWN)
97 96
98#define TX_BUFFS_AVAIL(tp) \ 97#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 98 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 2e72d79a143c..b18c92cb629e 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -235,7 +235,7 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
235 * Extern Function Prototypes 235 * Extern Function Prototypes
236 * 236 *
237 ******************************************************************************/ 237 ******************************************************************************/
238static const char SKRootName[] = "sk98lin"; 238static const char SKRootName[] = "net/sk98lin";
239static struct proc_dir_entry *pSkRootDir; 239static struct proc_dir_entry *pSkRootDir;
240extern struct file_operations sk_proc_fops; 240extern struct file_operations sk_proc_fops;
241 241
@@ -5242,20 +5242,20 @@ static int __init skge_init(void)
5242{ 5242{
5243 int error; 5243 int error;
5244 5244
5245 pSkRootDir = proc_mkdir(SKRootName, proc_net); 5245 pSkRootDir = proc_mkdir(SKRootName, NULL);
5246 if (pSkRootDir) 5246 if (pSkRootDir)
5247 pSkRootDir->owner = THIS_MODULE; 5247 pSkRootDir->owner = THIS_MODULE;
5248 5248
5249 error = pci_register_driver(&skge_driver); 5249 error = pci_register_driver(&skge_driver);
5250 if (error) 5250 if (error)
5251 proc_net_remove(SKRootName); 5251 remove_proc_entry(SKRootName, NULL);
5252 return error; 5252 return error;
5253} 5253}
5254 5254
5255static void __exit skge_exit(void) 5255static void __exit skge_exit(void)
5256{ 5256{
5257 pci_unregister_driver(&skge_driver); 5257 pci_unregister_driver(&skge_driver);
5258 proc_net_remove(SKRootName); 5258 remove_proc_entry(SKRootName, NULL);
5259 5259
5260} 5260}
5261 5261
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 189203c95330..572f121b1f4e 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1644,6 +1644,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
1644 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1644 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1645} 1645}
1646 1646
1647/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1648static int is_yukon_lite_a0(struct skge_hw *hw)
1649{
1650 u32 reg;
1651 int ret;
1652
1653 if (hw->chip_id != CHIP_ID_YUKON)
1654 return 0;
1655
1656 reg = skge_read32(hw, B2_FAR);
1657 skge_write8(hw, B2_FAR + 3, 0xff);
1658 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1659 skge_write32(hw, B2_FAR, reg);
1660 return ret;
1661}
1662
1647static void yukon_mac_init(struct skge_hw *hw, int port) 1663static void yukon_mac_init(struct skge_hw *hw, int port)
1648{ 1664{
1649 struct skge_port *skge = netdev_priv(hw->dev[port]); 1665 struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1759,9 +1775,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1759 /* Configure Rx MAC FIFO */ 1775 /* Configure Rx MAC FIFO */
1760 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1776 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1761 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1777 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1762 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1778
1763 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1779 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1780 if (is_yukon_lite_a0(hw))
1764 reg &= ~GMF_RX_F_FL_ON; 1781 reg &= ~GMF_RX_F_FL_ON;
1782
1765 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1783 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1766 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1784 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1767 /* 1785 /*
@@ -2820,21 +2838,29 @@ static void skge_netpoll(struct net_device *dev)
2820static int skge_set_mac_address(struct net_device *dev, void *p) 2838static int skge_set_mac_address(struct net_device *dev, void *p)
2821{ 2839{
2822 struct skge_port *skge = netdev_priv(dev); 2840 struct skge_port *skge = netdev_priv(dev);
2823 struct sockaddr *addr = p; 2841 struct skge_hw *hw = skge->hw;
2824 int err = 0; 2842 unsigned port = skge->port;
2843 const struct sockaddr *addr = p;
2825 2844
2826 if (!is_valid_ether_addr(addr->sa_data)) 2845 if (!is_valid_ether_addr(addr->sa_data))
2827 return -EADDRNOTAVAIL; 2846 return -EADDRNOTAVAIL;
2828 2847
2829 skge_down(dev); 2848 spin_lock_bh(&hw->phy_lock);
2830 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2849 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2831 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, 2850 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2832 dev->dev_addr, ETH_ALEN); 2851 dev->dev_addr, ETH_ALEN);
2833 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, 2852 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2834 dev->dev_addr, ETH_ALEN); 2853 dev->dev_addr, ETH_ALEN);
2835 if (dev->flags & IFF_UP) 2854
2836 err = skge_up(dev); 2855 if (hw->chip_id == CHIP_ID_GENESIS)
2837 return err; 2856 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2857 else {
2858 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2859 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2860 }
2861 spin_unlock_bh(&hw->phy_lock);
2862
2863 return 0;
2838} 2864}
2839 2865
2840static const struct { 2866static const struct {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 81f4aedf534c..1802c3b48799 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.40" 70#define DRV_MODULE_VERSION "3.42"
71#define DRV_MODULE_RELDATE "September 15, 2005" 71#define DRV_MODULE_RELDATE "Oct 3, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -3389,7 +3389,8 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3389 struct tg3 *tp = netdev_priv(dev); 3389 struct tg3 *tp = netdev_priv(dev);
3390 struct tg3_hw_status *sblk = tp->hw_status; 3390 struct tg3_hw_status *sblk = tp->hw_status;
3391 3391
3392 if (sblk->status & SD_STATUS_UPDATED) { 3392 if ((sblk->status & SD_STATUS_UPDATED) ||
3393 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3393 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3394 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3394 0x00000001); 3395 0x00000001);
3395 return IRQ_RETVAL(1); 3396 return IRQ_RETVAL(1);
@@ -5395,6 +5396,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5395 struct tg3 *tp = netdev_priv(dev); 5396 struct tg3 *tp = netdev_priv(dev);
5396 struct sockaddr *addr = p; 5397 struct sockaddr *addr = p;
5397 5398
5399 if (!is_valid_ether_addr(addr->sa_data))
5400 return -EINVAL;
5401
5398 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5402 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5399 5403
5400 spin_lock_bh(&tp->lock); 5404 spin_lock_bh(&tp->lock);
@@ -5806,6 +5810,13 @@ static int tg3_reset_hw(struct tg3 *tp)
5806 } 5810 }
5807 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 5811 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5808 5812
5813 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5814 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5815 /* reset to prevent losing 1st rx packet intermittently */
5816 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5817 udelay(10);
5818 }
5819
5809 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 5820 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5810 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 5821 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5811 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 5822 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -5937,7 +5948,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5937 tw32(MAC_LED_CTRL, tp->led_ctrl); 5948 tw32(MAC_LED_CTRL, tp->led_ctrl);
5938 5949
5939 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 5950 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5940 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 5951 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5941 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 5952 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5942 udelay(10); 5953 udelay(10);
5943 } 5954 }
@@ -7360,12 +7371,17 @@ static int tg3_nway_reset(struct net_device *dev)
7360 if (!netif_running(dev)) 7371 if (!netif_running(dev))
7361 return -EAGAIN; 7372 return -EAGAIN;
7362 7373
7374 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7375 return -EINVAL;
7376
7363 spin_lock_bh(&tp->lock); 7377 spin_lock_bh(&tp->lock);
7364 r = -EINVAL; 7378 r = -EINVAL;
7365 tg3_readphy(tp, MII_BMCR, &bmcr); 7379 tg3_readphy(tp, MII_BMCR, &bmcr);
7366 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7380 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7367 (bmcr & BMCR_ANENABLE)) { 7381 ((bmcr & BMCR_ANENABLE) ||
7368 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); 7382 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7383 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7384 BMCR_ANENABLE);
7369 r = 0; 7385 r = 0;
7370 } 7386 }
7371 spin_unlock_bh(&tp->lock); 7387 spin_unlock_bh(&tp->lock);
@@ -7927,19 +7943,32 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7927 struct tg3_rx_buffer_desc *desc; 7943 struct tg3_rx_buffer_desc *desc;
7928 7944
7929 if (loopback_mode == TG3_MAC_LOOPBACK) { 7945 if (loopback_mode == TG3_MAC_LOOPBACK) {
7946 /* HW errata - mac loopback fails in some cases on 5780.
7947 * Normal traffic and PHY loopback are not affected by
7948 * errata.
7949 */
7950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7951 return 0;
7952
7930 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7953 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7931 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | 7954 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7932 MAC_MODE_PORT_MODE_GMII; 7955 MAC_MODE_PORT_MODE_GMII;
7933 tw32(MAC_MODE, mac_mode); 7956 tw32(MAC_MODE, mac_mode);
7934 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 7957 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7958 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7959 BMCR_SPEED1000);
7960 udelay(40);
7961 /* reset to prevent losing 1st rx packet intermittently */
7962 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7963 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7964 udelay(10);
7965 tw32_f(MAC_RX_MODE, tp->rx_mode);
7966 }
7935 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7967 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7936 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; 7968 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7937 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) 7969 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7938 mac_mode &= ~MAC_MODE_LINK_POLARITY; 7970 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7939 tw32(MAC_MODE, mac_mode); 7971 tw32(MAC_MODE, mac_mode);
7940
7941 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7942 BMCR_SPEED1000);
7943 } 7972 }
7944 else 7973 else
7945 return -EINVAL; 7974 return -EINVAL;
@@ -9255,8 +9284,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9255 static struct pci_device_id write_reorder_chipsets[] = { 9284 static struct pci_device_id write_reorder_chipsets[] = {
9256 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9285 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9257 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9286 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9258 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9287 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9259 PCI_DEVICE_ID_AMD_K8_NB) }, 9288 PCI_DEVICE_ID_VIA_8385_0) },
9260 { }, 9289 { },
9261 }; 9290 };
9262 u32 misc_ctrl_reg; 9291 u32 misc_ctrl_reg;
@@ -9271,15 +9300,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9271 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9300 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9272#endif 9301#endif
9273 9302
9274 /* If we have an AMD 762 or K8 chipset, write
9275 * reordering to the mailbox registers done by the host
9276 * controller can cause major troubles. We read back from
9277 * every mailbox register write to force the writes to be
9278 * posted to the chip in order.
9279 */
9280 if (pci_dev_present(write_reorder_chipsets))
9281 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9282
9283 /* Force memory write invalidate off. If we leave it on, 9303 /* Force memory write invalidate off. If we leave it on,
9284 * then on 5700_BX chips we have to enable a workaround. 9304 * then on 5700_BX chips we have to enable a workaround.
9285 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 9305 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -9410,6 +9430,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9410 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) 9430 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9411 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 9431 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9412 9432
9433 /* If we have an AMD 762 or VIA K8T800 chipset, write
9434 * reordering to the mailbox registers done by the host
9435 * controller can cause major troubles. We read back from
9436 * every mailbox register write to force the writes to be
9437 * posted to the chip in order.
9438 */
9439 if (pci_dev_present(write_reorder_chipsets) &&
9440 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9441 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9442
9413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9414 tp->pci_lat_timer < 64) { 9444 tp->pci_lat_timer < 64) {
9415 tp->pci_lat_timer = 64; 9445 tp->pci_lat_timer = 64;
@@ -10324,6 +10354,44 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10324 }; 10354 };
10325} 10355}
10326 10356
10357static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10358{
10359 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10360 strcpy(str, "PCI Express");
10361 return str;
10362 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10363 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10364
10365 strcpy(str, "PCIX:");
10366
10367 if ((clock_ctrl == 7) ||
10368 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10369 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10370 strcat(str, "133MHz");
10371 else if (clock_ctrl == 0)
10372 strcat(str, "33MHz");
10373 else if (clock_ctrl == 2)
10374 strcat(str, "50MHz");
10375 else if (clock_ctrl == 4)
10376 strcat(str, "66MHz");
10377 else if (clock_ctrl == 6)
10378 strcat(str, "100MHz");
10379 else if (clock_ctrl == 7)
10380 strcat(str, "133MHz");
10381 } else {
10382 strcpy(str, "PCI:");
10383 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10384 strcat(str, "66MHz");
10385 else
10386 strcat(str, "33MHz");
10387 }
10388 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10389 strcat(str, ":32-bit");
10390 else
10391 strcat(str, ":64-bit");
10392 return str;
10393}
10394
10327static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) 10395static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10328{ 10396{
10329 struct pci_dev *peer; 10397 struct pci_dev *peer;
@@ -10386,6 +10454,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10386 struct net_device *dev; 10454 struct net_device *dev;
10387 struct tg3 *tp; 10455 struct tg3 *tp;
10388 int i, err, pci_using_dac, pm_cap; 10456 int i, err, pci_using_dac, pm_cap;
10457 char str[40];
10389 10458
10390 if (tg3_version_printed++ == 0) 10459 if (tg3_version_printed++ == 0)
10391 printk(KERN_INFO "%s", version); 10460 printk(KERN_INFO "%s", version);
@@ -10631,16 +10700,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10631 10700
10632 pci_set_drvdata(pdev, dev); 10701 pci_set_drvdata(pdev, dev);
10633 10702
10634 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ", 10703 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10635 dev->name, 10704 dev->name,
10636 tp->board_part_number, 10705 tp->board_part_number,
10637 tp->pci_chip_rev_id, 10706 tp->pci_chip_rev_id,
10638 tg3_phy_string(tp), 10707 tg3_phy_string(tp),
10639 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""), 10708 tg3_bus_string(tp, str),
10640 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10641 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10642 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10643 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10644 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000"); 10709 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10645 10710
10646 for (i = 0; i < 6; i++) 10711 for (i = 0; i < 6; i++)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index c184b773e585..2e733c60bfa4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2246,6 +2246,7 @@ struct tg3 {
2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ 2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2249 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \
2249 (X) == PHY_ID_BCM8002) 2250 (X) == PHY_ID_BCM8002)
2250 2251
2251 struct tg3_hw_stats *hw_stats; 2252 struct tg3_hw_stats *hw_stats;
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
index 74e151acef3e..7a8b22a7ea31 100644
--- a/drivers/net/wan/sdlamain.c
+++ b/drivers/net/wan/sdlamain.c
@@ -57,6 +57,7 @@
57#include <linux/ioport.h> /* request_region(), release_region() */ 57#include <linux/ioport.h> /* request_region(), release_region() */
58#include <linux/wanrouter.h> /* WAN router definitions */ 58#include <linux/wanrouter.h> /* WAN router definitions */
59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ 59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
60#include <linux/rcupdate.h>
60 61
61#include <linux/in.h> 62#include <linux/in.h>
62#include <asm/io.h> /* phys_to_virt() */ 63#include <asm/io.h> /* phys_to_virt() */
@@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option)
1268 1269
1269 struct in_ifaddr *ifaddr; 1270 struct in_ifaddr *ifaddr;
1270 struct in_device *in_dev; 1271 struct in_device *in_dev;
1272 unsigned long addr = 0;
1271 1273
1272 if ((in_dev = __in_dev_get(dev)) == NULL){ 1274 rcu_read_lock();
1273 return 0; 1275 if ((in_dev = __in_dev_get_rcu(dev)) == NULL){
1276 goto out;
1274 } 1277 }
1275 1278
1276 if ((ifaddr = in_dev->ifa_list)== NULL ){ 1279 if ((ifaddr = in_dev->ifa_list)== NULL ){
1277 return 0; 1280 goto out;
1278 } 1281 }
1279 1282
1280 switch (option){ 1283 switch (option){
1281 1284
1282 case WAN_LOCAL_IP: 1285 case WAN_LOCAL_IP:
1283 return ifaddr->ifa_local; 1286 addr = ifaddr->ifa_local;
1284 break; 1287 break;
1285 1288
1286 case WAN_POINTOPOINT_IP: 1289 case WAN_POINTOPOINT_IP:
1287 return ifaddr->ifa_address; 1290 addr = ifaddr->ifa_address;
1288 break; 1291 break;
1289 1292
1290 case WAN_NETMASK_IP: 1293 case WAN_NETMASK_IP:
1291 return ifaddr->ifa_mask; 1294 addr = ifaddr->ifa_mask;
1292 break; 1295 break;
1293 1296
1294 case WAN_BROADCAST_IP: 1297 case WAN_BROADCAST_IP:
1295 return ifaddr->ifa_broadcast; 1298 addr = ifaddr->ifa_broadcast;
1296 break; 1299 break;
1297 default: 1300 default:
1298 return 0; 1301 break;
1299 } 1302 }
1300 1303
1301 return 0; 1304out:
1305 rcu_read_unlock();
1306 return addr;
1302} 1307}
1303 1308
1304void add_gateway(sdla_t *card, struct net_device *dev) 1309void add_gateway(sdla_t *card, struct net_device *dev)
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 3731b22f6757..2d1bba06a085 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -767,7 +767,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
767 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */ 767 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
768#ifdef CONFIG_INET 768#ifdef CONFIG_INET
769 rcu_read_lock(); 769 rcu_read_lock();
770 if ((in_dev = __in_dev_get(dev)) != NULL) 770 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
771 { 771 {
772 for (ifa=in_dev->ifa_list; ifa != NULL; 772 for (ifa=in_dev->ifa_list; ifa != NULL;
773 ifa=ifa->ifa_next) { 773 ifa=ifa->ifa_next) {
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 4b0acae22b0d..7bc7fc823128 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer,
1352 struct in_device *in_dev; 1352 struct in_device *in_dev;
1353 1353
1354 rcu_read_lock(); 1354 rcu_read_lock();
1355 in_dev = __in_dev_get(strip_info->dev); 1355 in_dev = __in_dev_get_rcu(strip_info->dev);
1356 if (in_dev == NULL) { 1356 if (in_dev == NULL) {
1357 rcu_read_unlock(); 1357 rcu_read_unlock();
1358 return NULL; 1358 return NULL;
@@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb)
1508 1508
1509 brd = addr = 0; 1509 brd = addr = 0;
1510 rcu_read_lock(); 1510 rcu_read_lock();
1511 in_dev = __in_dev_get(strip_info->dev); 1511 in_dev = __in_dev_get_rcu(strip_info->dev);
1512 if (in_dev) { 1512 if (in_dev) {
1513 if (in_dev->ifa_list) { 1513 if (in_dev->ifa_list) {
1514 brd = in_dev->ifa_list->ifa_broadcast; 1514 brd = in_dev->ifa_list->ifa_broadcast;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index e90fb72a6962..286902298e33 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -37,6 +37,7 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/rcupdate.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <asm/processor.h> 42#include <asm/processor.h>
42#include <asm/hardware.h> 43#include <asm/hardware.h>
@@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void)
358 /* we are running as tasklet, so locking dev_base 359 /* we are running as tasklet, so locking dev_base
359 * for reading should be OK */ 360 * for reading should be OK */
360 read_lock(&dev_base_lock); 361 read_lock(&dev_base_lock);
362 rcu_read_lock();
361 for (dev = dev_base; dev; dev = dev->next) { 363 for (dev = dev_base; dev; dev = dev->next) {
362 struct net_device_stats *stats; 364 struct net_device_stats *stats;
363 struct in_device *in_dev = __in_dev_get(dev); 365 struct in_device *in_dev = __in_dev_get_rcu(dev);
364 if (!in_dev || !in_dev->ifa_list) 366 if (!in_dev || !in_dev->ifa_list)
365 continue; 367 continue;
366 if (LOOPBACK(in_dev->ifa_list->ifa_local)) 368 if (LOOPBACK(in_dev->ifa_list->ifa_local))
@@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void)
371 rx_total += stats->rx_packets; 373 rx_total += stats->rx_packets;
372 tx_total += stats->tx_packets; 374 tx_total += stats->tx_packets;
373 } 375 }
376 rcu_read_unlock();
374 read_unlock(&dev_base_lock); 377 read_unlock(&dev_base_lock);
375 378
376 retval = 0; 379 retval = 0;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c77d5b1bbff6..005786416bb5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev)
402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
403{ 403{
404 struct pci_bus *parent = child->parent; 404 struct pci_bus *parent = child->parent;
405
406 /* Attempts to fix that up are really dangerous unless
407 we're going to re-assign all bus numbers. */
408 if (!pcibios_assign_all_busses())
409 return;
410
405 while (parent->parent && parent->subordinate < max) { 411 while (parent->parent && parent->subordinate < max) {
406 parent->subordinate = max; 412 parent->subordinate = max;
407 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 413 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
@@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
478 * We need to assign a number to this bus which we always 484 * We need to assign a number to this bus which we always
479 * do in the second pass. 485 * do in the second pass.
480 */ 486 */
481 if (!pass) 487 if (!pass) {
488 if (pcibios_assign_all_busses())
489 /* Temporarily disable forwarding of the
490 configuration cycles on all bridges in
491 this bus segment to avoid possible
492 conflicts in the second pass between two
493 bridges programmed with overlapping
494 bus ranges. */
495 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
496 buses & ~0xffffff);
482 return max; 497 return max;
498 }
483 499
484 /* Clear errors */ 500 /* Clear errors */
485 pci_write_config_word(dev, PCI_STATUS, 0xffff); 501 pci_write_config_word(dev, PCI_STATUS, 0xffff);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ddc741e6ecbf..36cc9a96a338 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -146,7 +146,7 @@ config I82365
146 146
147config TCIC 147config TCIC
148 tristate "Databook TCIC host bridge support" 148 tristate "Databook TCIC host bridge support"
149 depends on PCMCIA 149 depends on PCMCIA && ISA
150 select PCCARD_NONSTATIC 150 select PCCARD_NONSTATIC
151 help 151 help
152 Say Y here to include support for the Databook TCIC family of PCMCIA 152 Say Y here to include support for the Databook TCIC family of PCMCIA
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 1d755e20880c..3f6d51d11374 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -228,6 +228,11 @@ int cb_alloc(struct pcmcia_socket * s)
228 pci_bus_size_bridges(bus); 228 pci_bus_size_bridges(bus);
229 pci_bus_assign_resources(bus); 229 pci_bus_assign_resources(bus);
230 cardbus_assign_irqs(bus, s->pci_irq); 230 cardbus_assign_irqs(bus, s->pci_irq);
231
232 /* socket specific tune function */
233 if (s->tune_bridge)
234 s->tune_bridge(s, bus);
235
231 pci_enable_bridges(bus); 236 pci_enable_bridges(bus);
232 pci_bus_add_devices(bus); 237 pci_bus_add_devices(bus);
233 238
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 08d1c9288264..94be9e51654e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -22,7 +22,6 @@
22 22
23#include <asm/hardware.h> 23#include <asm/hardware.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/mach-types.h>
26#include <asm/sizes.h> 25#include <asm/sizes.h>
27 26
28#include <asm/arch/mux.h> 27#include <asm/arch/mux.h>
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c42455d20eb6..f9a5c70284b5 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -691,7 +691,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
691 unsigned long size = end - start + 1; 691 unsigned long size = end - start + 1;
692 int ret = 0; 692 int ret = 0;
693 693
694 if (end <= start) 694 if (end < start)
695 return -EINVAL; 695 return -EINVAL;
696 696
697 down(&rsrc_sem); 697 down(&rsrc_sem);
@@ -724,7 +724,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
724 unsigned long size = end - start + 1; 724 unsigned long size = end - start + 1;
725 int ret = 0; 725 int ret = 0;
726 726
727 if (end <= start) 727 if (end < start)
728 return -EINVAL; 728 return -EINVAL;
729 729
730 if (end > IO_SPACE_LIMIT) 730 if (end > IO_SPACE_LIMIT)
@@ -817,7 +817,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
817 817
818 /* if we got at least one of IO, and one of MEM, we can be glad and 818 /* if we got at least one of IO, and one of MEM, we can be glad and
819 * activate the PCMCIA subsystem */ 819 * activate the PCMCIA subsystem */
820 if (done & (IORESOURCE_MEM | IORESOURCE_IO)) 820 if (done == (IORESOURCE_MEM | IORESOURCE_IO))
821 s->resource_setup_done = 1; 821 s->resource_setup_done = 1;
822 822
823 return 0; 823 return 0;
@@ -925,7 +925,7 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size
925 return -EINVAL; 925 return -EINVAL;
926 } 926 }
927 } 927 }
928 if (end_addr <= start_addr) 928 if (end_addr < start_addr)
929 return -EINVAL; 929 return -EINVAL;
930 930
931 ret = adjust_io(s, add, start_addr, end_addr); 931 ret = adjust_io(s, add, start_addr, end_addr);
@@ -977,7 +977,7 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz
977 return -EINVAL; 977 return -EINVAL;
978 } 978 }
979 } 979 }
980 if (end_addr <= start_addr) 980 if (end_addr < start_addr)
981 return -EINVAL; 981 return -EINVAL;
982 982
983 ret = adjust_memory(s, add, start_addr, end_addr); 983 ret = adjust_memory(s, add, start_addr, end_addr);
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index fbe233e19ceb..da0b404561c9 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -59,6 +59,7 @@
59 59
60#define TI122X_SCR_SER_STEP 0xc0000000 60#define TI122X_SCR_SER_STEP 0xc0000000
61#define TI122X_SCR_INTRTIE 0x20000000 61#define TI122X_SCR_INTRTIE 0x20000000
62#define TIXX21_SCR_TIEALL 0x10000000
62#define TI122X_SCR_CBRSVD 0x00400000 63#define TI122X_SCR_CBRSVD 0x00400000
63#define TI122X_SCR_MRBURSTDN 0x00008000 64#define TI122X_SCR_MRBURSTDN 0x00008000
64#define TI122X_SCR_MRBURSTUP 0x00004000 65#define TI122X_SCR_MRBURSTUP 0x00004000
@@ -153,6 +154,12 @@
153/* EnE test register */ 154/* EnE test register */
154#define ENE_TEST_C9 0xc9 /* 8bit */ 155#define ENE_TEST_C9 0xc9 /* 8bit */
155#define ENE_TEST_C9_TLTENABLE 0x02 156#define ENE_TEST_C9_TLTENABLE 0x02
157#define ENE_TEST_C9_PFENABLE_F0 0x04
158#define ENE_TEST_C9_PFENABLE_F1 0x08
159#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0)
160#define ENE_TEST_C9_WPDISALBLE_F0 0x40
161#define ENE_TEST_C9_WPDISALBLE_F1 0x80
162#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1)
156 163
157/* 164/*
158 * Texas Instruments CardBus controller overrides. 165 * Texas Instruments CardBus controller overrides.
@@ -618,6 +625,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
618 int devfn; 625 int devfn;
619 unsigned int state; 626 unsigned int state;
620 int ret = 1; 627 int ret = 1;
628 u32 sysctl;
621 629
622 /* catch the two-slot controllers */ 630 /* catch the two-slot controllers */
623 switch (socket->dev->device) { 631 switch (socket->dev->device) {
@@ -640,6 +648,24 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
640 */ 648 */
641 break; 649 break;
642 650
651 case PCI_DEVICE_ID_TI_X515:
652 case PCI_DEVICE_ID_TI_X420:
653 case PCI_DEVICE_ID_TI_X620:
654 case PCI_DEVICE_ID_TI_XX21_XX11:
655 case PCI_DEVICE_ID_TI_7410:
656 case PCI_DEVICE_ID_TI_7610:
657 /*
658 * those are either single or dual slot CB with additional functions
659 * like 1394, smartcard reader, etc. check the TIEALL flag for them
660 * the TIEALL flag binds the IRQ of all functions toghether.
661 * we catch the single slot variants later.
662 */
663 sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL);
664 if (sysctl & TIXX21_SCR_TIEALL)
665 return 0;
666
667 break;
668
643 /* single-slot controllers have the 2nd slot empty always :) */ 669 /* single-slot controllers have the 2nd slot empty always :) */
644 default: 670 default:
645 return 1; 671 return 1;
@@ -652,6 +678,15 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
652 if (!func) 678 if (!func)
653 return 1; 679 return 1;
654 680
681 /*
682 * check that the device id of both slots match. this is needed for the
683 * XX21 and the XX11 controller that share the same device id for single
684 * and dual slot controllers. return '2nd slot empty'. we already checked
685 * if the interrupt is tied to another function.
686 */
687 if (socket->dev->device != func->device)
688 goto out;
689
655 slot2 = pci_get_drvdata(func); 690 slot2 = pci_get_drvdata(func);
656 if (!slot2) 691 if (!slot2)
657 goto out; 692 goto out;
@@ -791,16 +826,6 @@ static int ti12xx_override(struct yenta_socket *socket)
791 config_writel(socket, TI113X_SYSTEM_CONTROL, val); 826 config_writel(socket, TI113X_SYSTEM_CONTROL, val);
792 827
793 /* 828 /*
794 * for EnE bridges only: clear testbit TLTEnable. this makes the
795 * RME Hammerfall DSP sound card working.
796 */
797 if (socket->dev->vendor == PCI_VENDOR_ID_ENE) {
798 u8 test_c9 = config_readb(socket, ENE_TEST_C9);
799 test_c9 &= ~ENE_TEST_C9_TLTENABLE;
800 config_writeb(socket, ENE_TEST_C9, test_c9);
801 }
802
803 /*
804 * Yenta expects controllers to use CSCINT to route 829 * Yenta expects controllers to use CSCINT to route
805 * CSC interrupts to PCI rather than INTVAL. 830 * CSC interrupts to PCI rather than INTVAL.
806 */ 831 */
@@ -841,5 +866,75 @@ static int ti1250_override(struct yenta_socket *socket)
841 return ti12xx_override(socket); 866 return ti12xx_override(socket);
842} 867}
843 868
869
870/**
871 * EnE specific part. EnE bridges are register compatible with TI bridges but
872 * have their own test registers and more important their own little problems.
873 * Some fixup code to make everybody happy (TM).
874 */
875
876/**
877 * set/clear various test bits:
878 * Defaults to clear the bit.
879 * - mask (u8) defines what bits to change
880 * - bits (u8) is the values to change them to
881 * -> it's
882 * current = (current & ~mask) | bits
883 */
884/* pci ids of devices that wants to have the bit set */
885#define DEVID(_vend,_dev,_subvend,_subdev,mask,bits) { \
886 .vendor = _vend, \
887 .device = _dev, \
888 .subvendor = _subvend, \
889 .subdevice = _subdev, \
890 .driver_data = ((mask) << 8 | (bits)), \
891 }
892static struct pci_device_id ene_tune_tbl[] = {
893 /* Echo Audio products based on motorola DSP56301 and DSP56361 */
894 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x1801, 0xECC0, PCI_ANY_ID,
895 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
896 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
897 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
898
899 {}
900};
901
902static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
903{
904 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
905 struct pci_dev *dev;
906 struct pci_device_id *id = NULL;
907 u8 test_c9, old_c9, mask, bits;
908
909 list_for_each_entry(dev, &bus->devices, bus_list) {
910 id = (struct pci_device_id *) pci_match_id(ene_tune_tbl, dev);
911 if (id)
912 break;
913 }
914
915 test_c9 = old_c9 = config_readb(socket, ENE_TEST_C9);
916 if (id) {
917 mask = (id->driver_data >> 8) & 0xFF;
918 bits = id->driver_data & 0xFF;
919
920 test_c9 = (test_c9 & ~mask) | bits;
921 }
922 else
923 /* default to clear TLTEnable bit, old behaviour */
924 test_c9 &= ~ENE_TEST_C9_TLTENABLE;
925
926 printk(KERN_INFO "yenta EnE: chaning testregister 0xC9, %02x -> %02x\n", old_c9, test_c9);
927 config_writeb(socket, ENE_TEST_C9, test_c9);
928}
929
930
931static int ene_override(struct yenta_socket *socket)
932{
933 /* install tune_bridge() function */
934 socket->socket.tune_bridge = ene_tune_bridge;
935
936 return ti1250_override(socket);
937}
938
844#endif /* _LINUX_TI113X_H */ 939#endif /* _LINUX_TI113X_H */
845 940
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index ba4d78e5b121..db9f952f9e3c 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -559,12 +559,6 @@ static void yenta_interrogate(struct yenta_socket *socket)
559static int yenta_sock_init(struct pcmcia_socket *sock) 559static int yenta_sock_init(struct pcmcia_socket *sock)
560{ 560{
561 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); 561 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
562 u16 bridge;
563
564 bridge = config_readw(socket, CB_BRIDGE_CONTROL) & ~CB_BRIDGE_INTR;
565 if (!socket->cb_irq)
566 bridge |= CB_BRIDGE_INTR;
567 config_writew(socket, CB_BRIDGE_CONTROL, bridge);
568 562
569 exca_writeb(socket, I365_GBLCTL, 0x00); 563 exca_writeb(socket, I365_GBLCTL, 0x00);
570 exca_writeb(socket, I365_GENCTL, 0x00); 564 exca_writeb(socket, I365_GENCTL, 0x00);
@@ -819,6 +813,7 @@ enum {
819 CARDBUS_TYPE_TOPIC95, 813 CARDBUS_TYPE_TOPIC95,
820 CARDBUS_TYPE_TOPIC97, 814 CARDBUS_TYPE_TOPIC97,
821 CARDBUS_TYPE_O2MICRO, 815 CARDBUS_TYPE_O2MICRO,
816 CARDBUS_TYPE_ENE,
822}; 817};
823 818
824/* 819/*
@@ -865,6 +860,12 @@ static struct cardbus_type cardbus_type[] = {
865 .override = o2micro_override, 860 .override = o2micro_override,
866 .restore_state = o2micro_restore_state, 861 .restore_state = o2micro_restore_state,
867 }, 862 },
863 [CARDBUS_TYPE_ENE] = {
864 .override = ene_override,
865 .save_state = ti_save_state,
866 .restore_state = ti_restore_state,
867 .sock_init = ti_init,
868 },
868}; 869};
869 870
870 871
@@ -883,16 +884,8 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
883{ 884{
884 int i; 885 int i;
885 unsigned long val; 886 unsigned long val;
886 u16 bridge_ctrl;
887 u32 mask; 887 u32 mask;
888 888
889 /* Set up ISA irq routing to probe the ISA irqs.. */
890 bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
891 if (!(bridge_ctrl & CB_BRIDGE_INTR)) {
892 bridge_ctrl |= CB_BRIDGE_INTR;
893 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
894 }
895
896 /* 889 /*
897 * Probe for usable interrupts using the force 890 * Probe for usable interrupts using the force
898 * register to generate bogus card status events. 891 * register to generate bogus card status events.
@@ -914,9 +907,6 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
914 907
915 mask = probe_irq_mask(val) & 0xffff; 908 mask = probe_irq_mask(val) & 0xffff;
916 909
917 bridge_ctrl &= ~CB_BRIDGE_INTR;
918 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
919
920 return mask; 910 return mask;
921} 911}
922 912
@@ -944,18 +934,11 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id, struct pt_regs *re
944/* probes the PCI interrupt, use only on override functions */ 934/* probes the PCI interrupt, use only on override functions */
945static int yenta_probe_cb_irq(struct yenta_socket *socket) 935static int yenta_probe_cb_irq(struct yenta_socket *socket)
946{ 936{
947 u16 bridge_ctrl;
948
949 if (!socket->cb_irq) 937 if (!socket->cb_irq)
950 return -1; 938 return -1;
951 939
952 socket->probe_status = 0; 940 socket->probe_status = 0;
953 941
954 /* disable ISA interrupts */
955 bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
956 bridge_ctrl &= ~CB_BRIDGE_INTR;
957 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
958
959 if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) { 942 if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) {
960 printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n"); 943 printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n");
961 return -1; 944 return -1;
@@ -966,7 +949,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
966 cb_writel(socket, CB_SOCKET_EVENT, -1); 949 cb_writel(socket, CB_SOCKET_EVENT, -1);
967 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); 950 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
968 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS); 951 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
969 952
970 msleep(100); 953 msleep(100);
971 954
972 /* disable interrupts */ 955 /* disable interrupts */
@@ -1004,11 +987,12 @@ static void yenta_config_init(struct yenta_socket *socket)
1004{ 987{
1005 u16 bridge; 988 u16 bridge;
1006 struct pci_dev *dev = socket->dev; 989 struct pci_dev *dev = socket->dev;
990 struct pci_bus_region region;
1007 991
1008 pci_set_power_state(socket->dev, 0); 992 pcibios_resource_to_bus(socket->dev, &region, &dev->resource[0]);
1009 993
1010 config_writel(socket, CB_LEGACY_MODE_BASE, 0); 994 config_writel(socket, CB_LEGACY_MODE_BASE, 0);
1011 config_writel(socket, PCI_BASE_ADDRESS_0, dev->resource[0].start); 995 config_writel(socket, PCI_BASE_ADDRESS_0, region.start);
1012 config_writew(socket, PCI_COMMAND, 996 config_writew(socket, PCI_COMMAND,
1013 PCI_COMMAND_IO | 997 PCI_COMMAND_IO |
1014 PCI_COMMAND_MEMORY | 998 PCI_COMMAND_MEMORY |
@@ -1031,8 +1015,8 @@ static void yenta_config_init(struct yenta_socket *socket)
1031 * - PCI interrupts enabled if a PCI interrupt exists.. 1015 * - PCI interrupts enabled if a PCI interrupt exists..
1032 */ 1016 */
1033 bridge = config_readw(socket, CB_BRIDGE_CONTROL); 1017 bridge = config_readw(socket, CB_BRIDGE_CONTROL);
1034 bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_INTR | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN); 1018 bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN);
1035 bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN | CB_BRIDGE_INTR; 1019 bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN;
1036 config_writew(socket, CB_BRIDGE_CONTROL, bridge); 1020 config_writew(socket, CB_BRIDGE_CONTROL, bridge);
1037} 1021}
1038 1022
@@ -1265,10 +1249,22 @@ static struct pci_device_id yenta_table [] = {
1265 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250), 1249 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250),
1266 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250), 1250 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250),
1267 1251
1268 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, TI12XX), 1252 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX),
1269 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, TI12XX), 1253 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX),
1270 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, TI1250), 1254 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX),
1271 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, TI12XX), 1255 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX),
1256 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX),
1257 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
1258 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
1259
1260 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX),
1261 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX),
1262 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX),
1263 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX),
1264 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
1265 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
1266 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
1267 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE),
1272 1268
1273 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH), 1269 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH),
1274 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH), 1270 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH),
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 2ad4797ce024..9963479ba89f 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -686,6 +686,7 @@ struct qeth_seqno {
686 __u32 pdu_hdr; 686 __u32 pdu_hdr;
687 __u32 pdu_hdr_ack; 687 __u32 pdu_hdr_ack;
688 __u16 ipa; 688 __u16 ipa;
689 __u32 pkt_seqno;
689}; 690};
690 691
691struct qeth_reply { 692struct qeth_reply {
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
848 "on interface %s", QETH_CARD_IFNAME(card)); 849 "on interface %s", QETH_CARD_IFNAME(card));
849 return -ENOMEM; 850 return -ENOMEM;
850 } 851 }
852 kfree_skb(*skb);
851 *skb = new_skb; 853 *skb = new_skb;
852 } 854 }
853 return 0; 855 return 0;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 86582cf1e19e..bd28e2438d7f 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -511,7 +511,7 @@ static int
511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) 511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
512{ 512{
513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; 513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
514 int rc = 0; 514 int rc = 0, rc2 = 0, rc3 = 0;
515 enum qeth_card_states recover_flag; 515 enum qeth_card_states recover_flag;
516 516
517 QETH_DBF_TEXT(setup, 3, "setoffl"); 517 QETH_DBF_TEXT(setup, 3, "setoffl");
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
523 CARD_BUS_ID(card)); 523 CARD_BUS_ID(card));
524 return -ERESTARTSYS; 524 return -ERESTARTSYS;
525 } 525 }
526 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || 526 rc = ccw_device_set_offline(CARD_DDEV(card));
527 (rc = ccw_device_set_offline(CARD_WDEV(card))) || 527 rc2 = ccw_device_set_offline(CARD_WDEV(card));
528 (rc = ccw_device_set_offline(CARD_RDEV(card)))) { 528 rc3 = ccw_device_set_offline(CARD_RDEV(card));
529 if (!rc)
530 rc = (rc2) ? rc2 : rc3;
531 if (rc)
529 QETH_DBF_TEXT_(setup, 2, "1err%d", rc); 532 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
530 }
531 if (recover_flag == CARD_STATE_UP) 533 if (recover_flag == CARD_STATE_UP)
532 card->state = CARD_STATE_RECOVER; 534 card->state = CARD_STATE_RECOVER;
533 qeth_notify_processes(); 535 qeth_notify_processes();
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card)
1046 spin_lock_init(&card->vlanlock); 1048 spin_lock_init(&card->vlanlock);
1047 card->vlangrp = NULL; 1049 card->vlangrp = NULL;
1048#endif 1050#endif
1051 spin_lock_init(&card->lock);
1049 spin_lock_init(&card->ip_lock); 1052 spin_lock_init(&card->ip_lock);
1050 spin_lock_init(&card->thread_mask_lock); 1053 spin_lock_init(&card->thread_mask_lock);
1051 card->thread_start_mask = 0; 1054 card->thread_start_mask = 0;
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data)
1626 spin_unlock_irqrestore(&reply->card->lock, flags); 1629 spin_unlock_irqrestore(&reply->card->lock, flags);
1627} 1630}
1628 1631
1629static void
1630qeth_reset_ip_addresses(struct qeth_card *card)
1631{
1632 QETH_DBF_TEXT(trace, 2, "rstipadd");
1633
1634 qeth_clear_ip_list(card, 0, 1);
1635 /* this function will also schedule the SET_IP_THREAD */
1636 qeth_set_multicast_list(card->dev);
1637}
1638
1639static struct qeth_ipa_cmd * 1632static struct qeth_ipa_cmd *
1640qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1633qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1641{ 1634{
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1664 "IP address reset.\n", 1657 "IP address reset.\n",
1665 QETH_CARD_IFNAME(card), 1658 QETH_CARD_IFNAME(card),
1666 card->info.chpid); 1659 card->info.chpid);
1667 card->lan_online = 1;
1668 netif_carrier_on(card->dev); 1660 netif_carrier_on(card->dev);
1669 qeth_reset_ip_addresses(card); 1661 qeth_schedule_recovery(card);
1670 return NULL; 1662 return NULL;
1671 case IPA_CMD_REGISTER_LOCAL_ADDR: 1663 case IPA_CMD_REGISTER_LOCAL_ADDR:
1672 QETH_DBF_TEXT(trace,3, "irla"); 1664 QETH_DBF_TEXT(trace,3, "irla");
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2387 skb_pull(skb, VLAN_HLEN); 2379 skb_pull(skb, VLAN_HLEN);
2388 } 2380 }
2389#endif 2381#endif
2382 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2390 return vlan_id; 2383 return vlan_id;
2391} 2384}
2392 2385
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card)
3014 return -ENOMEM; 3007 return -ENOMEM;
3015 } 3008 }
3016 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ 3009 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3017 ptr = (void *) __get_free_page(GFP_KERNEL); 3010 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3018 if (!ptr) { 3011 if (!ptr) {
3019 while (j > 0) 3012 while (j > 0)
3020 free_page((unsigned long) 3013 free_page((unsigned long)
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3058 if (card->qdio.state == QETH_QDIO_ALLOCATED) 3051 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3059 return 0; 3052 return 0;
3060 3053
3061 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); 3054 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3055 GFP_KERNEL|GFP_DMA);
3062 if (!card->qdio.in_q) 3056 if (!card->qdio.in_q)
3063 return - ENOMEM; 3057 return - ENOMEM;
3064 QETH_DBF_TEXT(setup, 2, "inq"); 3058 QETH_DBF_TEXT(setup, 2, "inq");
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3083 } 3077 }
3084 for (i = 0; i < card->qdio.no_out_queues; ++i){ 3078 for (i = 0; i < card->qdio.no_out_queues; ++i){
3085 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), 3079 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3086 GFP_KERNEL); 3080 GFP_KERNEL|GFP_DMA);
3087 if (!card->qdio.out_qs[i]){ 3081 if (!card->qdio.out_qs[i]){
3088 while (i > 0) 3082 while (i > 0)
3089 kfree(card->qdio.out_qs[--i]); 3083 kfree(card->qdio.out_qs[--i]);
@@ -5200,7 +5194,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5200 if (!card->vlangrp) 5194 if (!card->vlangrp)
5201 return; 5195 return;
5202 rcu_read_lock(); 5196 rcu_read_lock();
5203 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); 5197 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
5204 if (!in_dev) 5198 if (!in_dev)
5205 goto out; 5199 goto out;
5206 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 5200 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6470 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 6464 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6471 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 6465 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6472 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 6466 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6467 /* Disable IPV6 support hard coded for Hipersockets */
6468 if(card->info.type == QETH_CARD_TYPE_IQD)
6469 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
6473 } else { 6470 } else {
6474#ifdef CONFIG_QETH_IPV6 6471#ifdef CONFIG_QETH_IPV6
6475 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 6472 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
@@ -7725,7 +7722,7 @@ qeth_arp_constructor(struct neighbour *neigh)
7725 goto out; 7722 goto out;
7726 7723
7727 rcu_read_lock(); 7724 rcu_read_lock();
7728 in_dev = rcu_dereference(__in_dev_get(dev)); 7725 in_dev = __in_dev_get_rcu(dev);
7729 if (in_dev == NULL) { 7726 if (in_dev == NULL) {
7730 rcu_read_unlock(); 7727 rcu_read_unlock();
7731 return -EINVAL; 7728 return -EINVAL;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a6ac61611f35..a748fbfb6692 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -60,6 +60,7 @@
60 Remove un-needed eh_abort handler. 60 Remove un-needed eh_abort handler.
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers.
63*/ 64*/
64 65
65#include <linux/module.h> 66#include <linux/module.h>
@@ -82,7 +83,7 @@
82#include "3w-9xxx.h" 83#include "3w-9xxx.h"
83 84
84/* Globals */ 85/* Globals */
85#define TW_DRIVER_VERSION "2.26.02.003" 86#define TW_DRIVER_VERSION "2.26.02.004"
86static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 87static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
87static unsigned int twa_device_extension_count; 88static unsigned int twa_device_extension_count;
88static int twa_major = -1; 89static int twa_major = -1;
@@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
892 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 893 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
893 } 894 }
894 895
895 if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
897 writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
898 }
899
900 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 896 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
901 if (tw_dev->reset_print == 0) { 897 if (tw_dev->reset_print == 0) {
902 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 898 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
@@ -930,6 +926,36 @@ out:
930 return retval; 926 return retval;
931} /* End twa_empty_response_queue() */ 927} /* End twa_empty_response_queue() */
932 928
929/* This function will clear the pchip/response queue on 9550SX */
930static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
931{
932 u32 status_reg_value, response_que_value;
933 int count = 0, retval = 1;
934
935 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937
938 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
940 if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
941 /* P-chip settle time */
942 msleep(500);
943 retval = 0;
944 goto out;
945 }
946 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
947 count++;
948 }
949 if (count == TW_MAX_RESPONSE_DRAIN)
950 goto out;
951
952 retval = 0;
953 } else
954 retval = 0;
955out:
956 return retval;
957} /* End twa_empty_response_queue_large() */
958
933/* This function passes sense keys from firmware to scsi layer */ 959/* This function passes sense keys from firmware to scsi layer */
934static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 960static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
935{ 961{
@@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1613 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1639 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1614 1640
1615 while (tries < TW_MAX_RESET_TRIES) { 1641 while (tries < TW_MAX_RESET_TRIES) {
1616 if (do_soft_reset) 1642 if (do_soft_reset) {
1617 TW_SOFT_RESET(tw_dev); 1643 TW_SOFT_RESET(tw_dev);
1644 /* Clear pchip/response queue on 9550SX */
1645 if (twa_empty_response_queue_large(tw_dev)) {
1646 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1647 do_soft_reset = 1;
1648 tries++;
1649 continue;
1650 }
1651 }
1618 1652
1619 /* Make sure controller is in a good state */ 1653 /* Make sure controller is in a good state */
1620 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1654 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
@@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2034 goto out_free_device_extension; 2068 goto out_free_device_extension;
2035 } 2069 }
2036 2070
2037 mem_addr = pci_resource_start(pdev, 1); 2071 if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
2072 mem_addr = pci_resource_start(pdev, 1);
2073 else
2074 mem_addr = pci_resource_start(pdev, 2);
2038 2075
2039 /* Save base address */ 2076 /* Save base address */
2040 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2077 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
@@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
2148static struct pci_device_id twa_pci_tbl[] __devinitdata = { 2185static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2149 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2186 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2188 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2151 { } 2190 { }
2152}; 2191};
2153MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2192MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 8c8ecbed3b58..46f22cdc8298 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
270#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
271 270
272/* Status register bit definitions */ 271/* Status register bit definitions */
273#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 272#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
285#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 284#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
286#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 285#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
287#define TW_STATUS_EXPECTED_BITS 0x00002000 286#define TW_STATUS_EXPECTED_BITS 0x00002000
288#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 287#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
289#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 288#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
290#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
291 289
292/* RESPONSE QUEUE BIT DEFINITIONS */ 290/* RESPONSE QUEUE BIT DEFINITIONS */
293#define TW_RESPONSE_ID_MASK 0x00000FF0 291#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
324 322
325/* Compatibility defines */ 323/* Compatibility defines */
326#define TW_9000_ARCH_ID 0x5 324#define TW_9000_ARCH_ID 0x5
327#define TW_CURRENT_DRIVER_SRL 28 325#define TW_CURRENT_DRIVER_SRL 30
328#define TW_CURRENT_DRIVER_BUILD 9 326#define TW_CURRENT_DRIVER_BUILD 80
329#define TW_CURRENT_DRIVER_BRANCH 4 327#define TW_CURRENT_DRIVER_BRANCH 0
330 328
331/* Phase defines */ 329/* Phase defines */
332#define TW_PHASE_INITIAL 0 330#define TW_PHASE_INITIAL 0
@@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
334#define TW_PHASE_SGLIST 2 332#define TW_PHASE_SGLIST 2
335 333
336/* Misc defines */ 334/* Misc defines */
335#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
337#define TW_SECTOR_SIZE 512 336#define TW_SECTOR_SIZE 512
338#define TW_ALIGNMENT_9000 4 /* 4 bytes */ 337#define TW_ALIGNMENT_9000 4 /* 4 bytes */
339#define TW_ALIGNMENT_9000_SGL 0x3 338#define TW_ALIGNMENT_9000_SGL 0x3
@@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
417#ifndef PCI_DEVICE_ID_3WARE_9000 416#ifndef PCI_DEVICE_ID_3WARE_9000
418#define PCI_DEVICE_ID_3WARE_9000 0x1002 417#define PCI_DEVICE_ID_3WARE_9000 0x1002
419#endif 418#endif
419#ifndef PCI_DEVICE_ID_3WARE_9550SX
420#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
421#endif
420 422
421/* Bitmask macros to eliminate bitfields */ 423/* Bitmask macros to eliminate bitfields */
422 424
@@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
443#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4) 445#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
444#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8)) 446#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
445#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC) 447#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
448#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
446#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 449#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
447#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 450#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
448#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 451#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1e4edbdf2730..48529d180ca8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o 99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
102obj-$(CONFIG_MEGARAID_SAS) += megaraid/
102obj-$(CONFIG_SCSI_ACARD) += atp870u.o 103obj-$(CONFIG_SCSI_ACARD) += atp870u.o
103obj-$(CONFIG_SCSI_SUNESP) += esp.o 104obj-$(CONFIG_SCSI_SUNESP) += esp.o
104obj-$(CONFIG_SCSI_GDTH) += gdth.o 105obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a8e3dfcd0dc7..93416f760e5a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
313 } 313 }
314 dresp = (struct aac_mount *)fib_data(fibptr); 314 dresp = (struct aac_mount *)fib_data(fibptr);
315 315
316 if ((le32_to_cpu(dresp->status) == ST_OK) &&
317 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
318 dinfo->command = cpu_to_le32(VM_NameServe64);
319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS);
321
322 if (fib_send(ContainerCommand,
323 fibptr,
324 sizeof(struct aac_query_mount),
325 FsaNormal,
326 1, 1,
327 NULL, NULL) < 0)
328 continue;
329 } else
330 dresp->mnt[0].capacityhigh = 0;
331
316 dprintk ((KERN_DEBUG 332 dprintk ((KERN_DEBUG
317 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", 333 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
318 (int)index, (int)le32_to_cpu(dresp->status), 334 (int)index, (int)le32_to_cpu(dresp->status),
319 (int)le32_to_cpu(dresp->mnt[0].vol), 335 (int)le32_to_cpu(dresp->mnt[0].vol),
320 (int)le32_to_cpu(dresp->mnt[0].state), 336 (int)le32_to_cpu(dresp->mnt[0].state),
321 (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); 337 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
338 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
322 if ((le32_to_cpu(dresp->status) == ST_OK) && 339 if ((le32_to_cpu(dresp->status) == ST_OK) &&
323 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 340 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
324 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 341 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
325 fsa_dev_ptr[index].valid = 1; 342 fsa_dev_ptr[index].valid = 1;
326 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); 343 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
327 fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); 344 fsa_dev_ptr[index].size
345 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
346 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
328 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
329 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
330 } 349 }
@@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
460 * is updated in the struct fsa_dev_info structure rather than returned. 479 * is updated in the struct fsa_dev_info structure rather than returned.
461 */ 480 */
462 481
463static int probe_container(struct aac_dev *dev, int cid) 482int probe_container(struct aac_dev *dev, int cid)
464{ 483{
465 struct fsa_dev_info *fsa_dev_ptr; 484 struct fsa_dev_info *fsa_dev_ptr;
466 int status; 485 int status;
@@ -497,11 +516,29 @@ static int probe_container(struct aac_dev *dev, int cid)
497 dresp = (struct aac_mount *) fib_data(fibptr); 516 dresp = (struct aac_mount *) fib_data(fibptr);
498 517
499 if ((le32_to_cpu(dresp->status) == ST_OK) && 518 if ((le32_to_cpu(dresp->status) == ST_OK) &&
519 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
520 dinfo->command = cpu_to_le32(VM_NameServe64);
521 dinfo->count = cpu_to_le32(cid);
522 dinfo->type = cpu_to_le32(FT_FILESYS);
523
524 if (fib_send(ContainerCommand,
525 fibptr,
526 sizeof(struct aac_query_mount),
527 FsaNormal,
528 1, 1,
529 NULL, NULL) < 0)
530 goto error;
531 } else
532 dresp->mnt[0].capacityhigh = 0;
533
534 if ((le32_to_cpu(dresp->status) == ST_OK) &&
500 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 535 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
501 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 536 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
502 fsa_dev_ptr[cid].valid = 1; 537 fsa_dev_ptr[cid].valid = 1;
503 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 538 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
504 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); 539 fsa_dev_ptr[cid].size
540 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
541 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
505 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 542 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
506 fsa_dev_ptr[cid].ro = 1; 543 fsa_dev_ptr[cid].ro = 1;
507 } 544 }
@@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
655 fibptr, 692 fibptr,
656 sizeof(*info), 693 sizeof(*info),
657 FsaNormal, 694 FsaNormal,
658 1, 1, 695 -1, 1, /* First `interrupt' command uses special wait */
659 NULL, 696 NULL,
660 NULL); 697 NULL);
661 698
@@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
806 if (!(dev->raw_io_interface)) { 843 if (!(dev->raw_io_interface)) {
807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 844 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
808 sizeof(struct aac_fibhdr) - 845 sizeof(struct aac_fibhdr) -
809 sizeof(struct aac_write) + sizeof(struct sgmap)) / 846 sizeof(struct aac_write) + sizeof(struct sgentry)) /
810 sizeof(struct sgmap); 847 sizeof(struct sgentry);
811 if (dev->dac_support) { 848 if (dev->dac_support) {
812 /* 849 /*
813 * 38 scatter gather elements 850 * 38 scatter gather elements
@@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
816 (dev->max_fib_size - 853 (dev->max_fib_size -
817 sizeof(struct aac_fibhdr) - 854 sizeof(struct aac_fibhdr) -
818 sizeof(struct aac_write64) + 855 sizeof(struct aac_write64) +
819 sizeof(struct sgmap64)) / 856 sizeof(struct sgentry64)) /
820 sizeof(struct sgmap64); 857 sizeof(struct sgentry64);
821 } 858 }
822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 859 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 860 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
854 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 891 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 892 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
856 893
857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies)); 894 if (nblank(dprintk(x))) {
895 u64 lba;
896 switch (scsicmd->cmnd[0]) {
897 case WRITE_6:
898 case READ_6:
899 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
900 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
901 break;
902 case WRITE_16:
903 case READ_16:
904 lba = ((u64)scsicmd->cmnd[2] << 56) |
905 ((u64)scsicmd->cmnd[3] << 48) |
906 ((u64)scsicmd->cmnd[4] << 40) |
907 ((u64)scsicmd->cmnd[5] << 32) |
908 ((u64)scsicmd->cmnd[6] << 24) |
909 (scsicmd->cmnd[7] << 16) |
910 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
911 break;
912 case WRITE_12:
913 case READ_12:
914 lba = ((u64)scsicmd->cmnd[2] << 24) |
915 (scsicmd->cmnd[3] << 16) |
916 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
917 break;
918 default:
919 lba = ((u64)scsicmd->cmnd[2] << 24) |
920 (scsicmd->cmnd[3] << 16) |
921 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
922 break;
923 }
924 printk(KERN_DEBUG
925 "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies);
927 }
858 928
859 if (fibptr == NULL) 929 if (fibptr == NULL)
860 BUG(); 930 BUG();
@@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
895 965
896static int aac_read(struct scsi_cmnd * scsicmd, int cid) 966static int aac_read(struct scsi_cmnd * scsicmd, int cid)
897{ 967{
898 u32 lba; 968 u64 lba;
899 u32 count; 969 u32 count;
900 int status; 970 int status;
901 971
@@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
907 /* 977 /*
908 * Get block address and transfer length 978 * Get block address and transfer length
909 */ 979 */
910 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ 980 switch (scsicmd->cmnd[0]) {
911 { 981 case READ_6:
912 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 982 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
913 983
914 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 984 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
985 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
915 count = scsicmd->cmnd[4]; 986 count = scsicmd->cmnd[4];
916 987
917 if (count == 0) 988 if (count == 0)
918 count = 256; 989 count = 256;
919 } else { 990 break;
991 case READ_16:
992 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
993
994 lba = ((u64)scsicmd->cmnd[2] << 56) |
995 ((u64)scsicmd->cmnd[3] << 48) |
996 ((u64)scsicmd->cmnd[4] << 40) |
997 ((u64)scsicmd->cmnd[5] << 32) |
998 ((u64)scsicmd->cmnd[6] << 24) |
999 (scsicmd->cmnd[7] << 16) |
1000 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1001 count = (scsicmd->cmnd[10] << 24) |
1002 (scsicmd->cmnd[11] << 16) |
1003 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1004 break;
1005 case READ_12:
1006 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
1007
1008 lba = ((u64)scsicmd->cmnd[2] << 24) |
1009 (scsicmd->cmnd[3] << 16) |
1010 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1011 count = (scsicmd->cmnd[6] << 24) |
1012 (scsicmd->cmnd[7] << 16) |
1013 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1014 break;
1015 default:
920 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 1016 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
921 1017
922 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1018 lba = ((u64)scsicmd->cmnd[2] << 24) |
1019 (scsicmd->cmnd[3] << 16) |
1020 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
923 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1021 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1022 break;
924 } 1023 }
925 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", 1024 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies)); 1025 smp_processor_id(), (unsigned long long)lba, jiffies));
1026 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
1027 (lba & 0xffffffff00000000LL)) {
1028 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1029 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1030 SAM_STAT_CHECK_CONDITION;
1031 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1032 HARDWARE_ERROR,
1033 SENCODE_INTERNAL_TARGET_FAILURE,
1034 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1035 0, 0);
1036 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1037 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1038 ? sizeof(scsicmd->sense_buffer)
1039 : sizeof(dev->fsa_dev[cid].sense_data));
1040 scsicmd->scsi_done(scsicmd);
1041 return 0;
1042 }
927 /* 1043 /*
928 * Alocate and initialize a Fib 1044 * Alocate and initialize a Fib
929 */ 1045 */
@@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
936 if (dev->raw_io_interface) { 1052 if (dev->raw_io_interface) {
937 struct aac_raw_io *readcmd; 1053 struct aac_raw_io *readcmd;
938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1054 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
939 readcmd->block[0] = cpu_to_le32(lba); 1055 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
940 readcmd->block[1] = 0; 1056 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
941 readcmd->count = cpu_to_le32(count<<9); 1057 readcmd->count = cpu_to_le32(count<<9);
942 readcmd->cid = cpu_to_le16(cid); 1058 readcmd->cid = cpu_to_le16(cid);
943 readcmd->flags = cpu_to_le16(1); 1059 readcmd->flags = cpu_to_le16(1);
@@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
964 readcmd->command = cpu_to_le32(VM_CtHostRead64); 1080 readcmd->command = cpu_to_le32(VM_CtHostRead64);
965 readcmd->cid = cpu_to_le16(cid); 1081 readcmd->cid = cpu_to_le16(cid);
966 readcmd->sector_count = cpu_to_le16(count); 1082 readcmd->sector_count = cpu_to_le16(count);
967 readcmd->block = cpu_to_le32(lba); 1083 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
968 readcmd->pad = 0; 1084 readcmd->pad = 0;
969 readcmd->flags = 0; 1085 readcmd->flags = 0;
970 1086
@@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
989 readcmd = (struct aac_read *) fib_data(cmd_fibcontext); 1105 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
990 readcmd->command = cpu_to_le32(VM_CtBlockRead); 1106 readcmd->command = cpu_to_le32(VM_CtBlockRead);
991 readcmd->cid = cpu_to_le32(cid); 1107 readcmd->cid = cpu_to_le32(cid);
992 readcmd->block = cpu_to_le32(lba); 1108 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
993 readcmd->count = cpu_to_le32(count * 512); 1109 readcmd->count = cpu_to_le32(count * 512);
994 1110
995 aac_build_sg(scsicmd, &readcmd->sg); 1111 aac_build_sg(scsicmd, &readcmd->sg);
@@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1031 1147
1032static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1148static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1033{ 1149{
1034 u32 lba; 1150 u64 lba;
1035 u32 count; 1151 u32 count;
1036 int status; 1152 int status;
1037 u16 fibsize; 1153 u16 fibsize;
@@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1048 count = scsicmd->cmnd[4]; 1164 count = scsicmd->cmnd[4];
1049 if (count == 0) 1165 if (count == 0)
1050 count = 256; 1166 count = 256;
1167 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1168 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
1169
1170 lba = ((u64)scsicmd->cmnd[2] << 56) |
1171 ((u64)scsicmd->cmnd[3] << 48) |
1172 ((u64)scsicmd->cmnd[4] << 40) |
1173 ((u64)scsicmd->cmnd[5] << 32) |
1174 ((u64)scsicmd->cmnd[6] << 24) |
1175 (scsicmd->cmnd[7] << 16) |
1176 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1177 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1178 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1179 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1180 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
1181
1182 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1183 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1184 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1185 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1051 } else { 1186 } else {
1052 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1187 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
1053 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1188 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1054 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1189 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1055 } 1190 }
1056 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 1191 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1057 smp_processor_id(), (unsigned long long)lba, jiffies)); 1192 smp_processor_id(), (unsigned long long)lba, jiffies));
1193 if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
1194 && (lba & 0xffffffff00000000LL)) {
1195 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1196 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1197 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1198 HARDWARE_ERROR,
1199 SENCODE_INTERNAL_TARGET_FAILURE,
1200 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1201 0, 0);
1202 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1203 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1204 ? sizeof(scsicmd->sense_buffer)
1205 : sizeof(dev->fsa_dev[cid].sense_data));
1206 scsicmd->scsi_done(scsicmd);
1207 return 0;
1208 }
1058 /* 1209 /*
1059 * Allocate and initialize a Fib then setup a BlockWrite command 1210 * Allocate and initialize a Fib then setup a BlockWrite command
1060 */ 1211 */
@@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1068 if (dev->raw_io_interface) { 1219 if (dev->raw_io_interface) {
1069 struct aac_raw_io *writecmd; 1220 struct aac_raw_io *writecmd;
1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1221 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1071 writecmd->block[0] = cpu_to_le32(lba); 1222 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1072 writecmd->block[1] = 0; 1223 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1073 writecmd->count = cpu_to_le32(count<<9); 1224 writecmd->count = cpu_to_le32(count<<9);
1074 writecmd->cid = cpu_to_le16(cid); 1225 writecmd->cid = cpu_to_le16(cid);
1075 writecmd->flags = 0; 1226 writecmd->flags = 0;
@@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1247 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1097 writecmd->cid = cpu_to_le16(cid); 1248 writecmd->cid = cpu_to_le16(cid);
1098 writecmd->sector_count = cpu_to_le16(count); 1249 writecmd->sector_count = cpu_to_le16(count);
1099 writecmd->block = cpu_to_le32(lba); 1250 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1100 writecmd->pad = 0; 1251 writecmd->pad = 0;
1101 writecmd->flags = 0; 1252 writecmd->flags = 0;
1102 1253
@@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1121 writecmd = (struct aac_write *) fib_data(cmd_fibcontext); 1272 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1122 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1273 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1123 writecmd->cid = cpu_to_le32(cid); 1274 writecmd->cid = cpu_to_le32(cid);
1124 writecmd->block = cpu_to_le32(lba); 1275 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1125 writecmd->count = cpu_to_le32(count * 512); 1276 writecmd->count = cpu_to_le32(count * 512);
1126 writecmd->sg.count = cpu_to_le32(1); 1277 writecmd->sg.count = cpu_to_le32(1);
1127 /* ->stable is not used - it did mean which type of write */ 1278 /* ->stable is not used - it did mean which type of write */
@@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1310 */ 1461 */
1311 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 1462 if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1312 switch (scsicmd->cmnd[0]) { 1463 switch (scsicmd->cmnd[0]) {
1464 case SERVICE_ACTION_IN:
1465 if (!(dev->raw_io_interface) ||
1466 !(dev->raw_io_64) ||
1467 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1468 break;
1313 case INQUIRY: 1469 case INQUIRY:
1314 case READ_CAPACITY: 1470 case READ_CAPACITY:
1315 case TEST_UNIT_READY: 1471 case TEST_UNIT_READY:
1316 spin_unlock_irq(host->host_lock); 1472 spin_unlock_irq(host->host_lock);
1317 probe_container(dev, cid); 1473 probe_container(dev, cid);
1474 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1475 fsa_dev_ptr[cid].valid = 0;
1318 spin_lock_irq(host->host_lock); 1476 spin_lock_irq(host->host_lock);
1319 if (fsa_dev_ptr[cid].valid == 0) { 1477 if (fsa_dev_ptr[cid].valid == 0) {
1320 scsicmd->result = DID_NO_CONNECT << 16; 1478 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1375 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1533 memset(&inq_data, 0, sizeof (struct inquiry_data));
1376 1534
1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1535 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1536 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1380 inq_data.inqd_len = 31; 1537 inq_data.inqd_len = 31;
1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1538 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
@@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1554 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1398 return aac_get_container_name(scsicmd, cid); 1555 return aac_get_container_name(scsicmd, cid);
1399 } 1556 }
1557 case SERVICE_ACTION_IN:
1558 if (!(dev->raw_io_interface) ||
1559 !(dev->raw_io_64) ||
1560 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1561 break;
1562 {
1563 u64 capacity;
1564 char cp[12];
1565 unsigned int offset = 0;
1566
1567 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
1568 capacity = fsa_dev_ptr[cid].size - 1;
1569 if (scsicmd->cmnd[13] > 12) {
1570 offset = scsicmd->cmnd[13] - 12;
1571 if (offset > sizeof(cp))
1572 break;
1573 memset(cp, 0, offset);
1574 aac_internal_transfer(scsicmd, cp, 0, offset);
1575 }
1576 cp[0] = (capacity >> 56) & 0xff;
1577 cp[1] = (capacity >> 48) & 0xff;
1578 cp[2] = (capacity >> 40) & 0xff;
1579 cp[3] = (capacity >> 32) & 0xff;
1580 cp[4] = (capacity >> 24) & 0xff;
1581 cp[5] = (capacity >> 16) & 0xff;
1582 cp[6] = (capacity >> 8) & 0xff;
1583 cp[7] = (capacity >> 0) & 0xff;
1584 cp[8] = 0;
1585 cp[9] = 0;
1586 cp[10] = 2;
1587 cp[11] = 0;
1588 aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
1589
1590 /* Do not cache partition table for arrays */
1591 scsicmd->device->removable = 1;
1592
1593 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1594 scsicmd->scsi_done(scsicmd);
1595
1596 return 0;
1597 }
1598
1400 case READ_CAPACITY: 1599 case READ_CAPACITY:
1401 { 1600 {
1402 u32 capacity; 1601 u32 capacity;
1403 char cp[8]; 1602 char cp[8];
1404 1603
1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1604 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1605 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
1407 capacity = fsa_dev_ptr[cid].size - 1; 1606 capacity = fsa_dev_ptr[cid].size - 1;
1408 else 1607 else
1409 capacity = (u32)-1; 1608 capacity = (u32)-1;
@@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1417 cp[6] = 2; 1616 cp[6] = 2;
1418 cp[7] = 0; 1617 cp[7] = 0;
1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); 1618 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1619 /* Do not cache partition table for arrays */
1620 scsicmd->device->removable = 1;
1420 1621
1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1622 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1422 scsicmd->scsi_done(scsicmd); 1623 scsicmd->scsi_done(scsicmd);
@@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1497 { 1698 {
1498 case READ_6: 1699 case READ_6:
1499 case READ_10: 1700 case READ_10:
1701 case READ_12:
1702 case READ_16:
1500 /* 1703 /*
1501 * Hack to keep track of ordinal number of the device that 1704 * Hack to keep track of ordinal number of the device that
1502 * corresponds to a container. Needed to convert 1705 * corresponds to a container. Needed to convert
@@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1504 */ 1707 */
1505 1708
1506 spin_unlock_irq(host->host_lock); 1709 spin_unlock_irq(host->host_lock);
1507 if (scsicmd->request->rq_disk) 1710 if (scsicmd->request->rq_disk)
1508 memcpy(fsa_dev_ptr[cid].devname, 1711 strlcpy(fsa_dev_ptr[cid].devname,
1509 scsicmd->request->rq_disk->disk_name, 1712 scsicmd->request->rq_disk->disk_name,
1510 8); 1713 min(sizeof(fsa_dev_ptr[cid].devname),
1511 1714 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1512 ret = aac_read(scsicmd, cid); 1715 ret = aac_read(scsicmd, cid);
1513 spin_lock_irq(host->host_lock); 1716 spin_lock_irq(host->host_lock);
1514 return ret; 1717 return ret;
1515 1718
1516 case WRITE_6: 1719 case WRITE_6:
1517 case WRITE_10: 1720 case WRITE_10:
1721 case WRITE_12:
1722 case WRITE_16:
1518 spin_unlock_irq(host->host_lock); 1723 spin_unlock_irq(host->host_lock);
1519 ret = aac_write(scsicmd, cid); 1724 ret = aac_write(scsicmd, cid);
1520 spin_lock_irq(host->host_lock); 1725 spin_lock_irq(host->host_lock);
@@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1745 case WRITE_10: 1950 case WRITE_10:
1746 case READ_12: 1951 case READ_12:
1747 case WRITE_12: 1952 case WRITE_12:
1953 case READ_16:
1954 case WRITE_16:
1748 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { 1955 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1749 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 1956 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1750 } else { 1957 } else {
@@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1850 sizeof(scsicmd->sense_buffer) : 2057 sizeof(scsicmd->sense_buffer) :
1851 le32_to_cpu(srbreply->sense_data_size); 2058 le32_to_cpu(srbreply->sense_data_size);
1852#ifdef AAC_DETAILED_STATUS_INFO 2059#ifdef AAC_DETAILED_STATUS_INFO
1853 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 2060 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1854 le32_to_cpu(srbreply->status), len)); 2061 le32_to_cpu(srbreply->status), len);
1855#endif 2062#endif
1856 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 2063 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1857 2064
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e40528185d48..4a99d2f000f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,6 +1,10 @@
1#if (!defined(dprintk)) 1#if (!defined(dprintk))
2# define dprintk(x) 2# define dprintk(x)
3#endif 3#endif
4/* eg: if (nblank(dprintk(x))) */
5#define _nblank(x) #x
6#define nblank(x) _nblank(x)[0]
7
4 8
5/*------------------------------------------------------------------------------ 9/*------------------------------------------------------------------------------
6 * D E F I N E S 10 * D E F I N E S
@@ -302,7 +306,6 @@ enum aac_queue_types {
302 */ 306 */
303 307
304#define FsaNormal 1 308#define FsaNormal 1
305#define FsaHigh 2
306 309
307/* 310/*
308 * Define the FIB. The FIB is the where all the requested data and 311 * Define the FIB. The FIB is the where all the requested data and
@@ -546,8 +549,6 @@ struct aac_queue {
546 /* This is only valid for adapter to host command queues. */ 549 /* This is only valid for adapter to host command queues. */
547 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ 550 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
548 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ 551 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
549 unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
550 u32 padding; /* Padding - FIXME - can remove I believe */
551 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ 552 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
552 /* only valid for command queues which receive entries from the adapter. */ 553 /* only valid for command queues which receive entries from the adapter. */
553 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ 554 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
@@ -776,7 +777,9 @@ struct fsa_dev_info {
776 u64 last; 777 u64 last;
777 u64 size; 778 u64 size;
778 u32 type; 779 u32 type;
780 u32 config_waiting_on;
779 u16 queue_depth; 781 u16 queue_depth;
782 u8 config_needed;
780 u8 valid; 783 u8 valid;
781 u8 ro; 784 u8 ro;
782 u8 locked; 785 u8 locked;
@@ -1012,6 +1015,7 @@ struct aac_dev
1012 /* macro side-effects BEWARE */ 1015 /* macro side-effects BEWARE */
1013# define raw_io_interface \ 1016# define raw_io_interface \
1014 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1017 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1018 u8 raw_io_64;
1015 u8 printf_enabled; 1019 u8 printf_enabled;
1016}; 1020};
1017 1021
@@ -1362,8 +1366,10 @@ struct aac_srb_reply
1362#define VM_CtBlockVerify64 18 1366#define VM_CtBlockVerify64 18
1363#define VM_CtHostRead64 19 1367#define VM_CtHostRead64 19
1364#define VM_CtHostWrite64 20 1368#define VM_CtHostWrite64 20
1369#define VM_DrvErrTblLog 21
1370#define VM_NameServe64 22
1365 1371
1366#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */ 1372#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
1367 1373
1368/* 1374/*
1369 * Descriptive information (eg, vital stats) 1375 * Descriptive information (eg, vital stats)
@@ -1472,6 +1478,7 @@ struct aac_mntent {
1472 manager (eg, filesystem) */ 1478 manager (eg, filesystem) */
1473 __le32 altoid; /* != oid <==> snapshot or 1479 __le32 altoid; /* != oid <==> snapshot or
1474 broken mirror exists */ 1480 broken mirror exists */
1481 __le32 capacityhigh;
1475}; 1482};
1476 1483
1477#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */ 1484#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
@@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
1707#define AifCmdJobProgress 2 /* Progress report */ 1714#define AifCmdJobProgress 2 /* Progress report */
1708#define AifJobCtrZero 101 /* Array Zero progress */ 1715#define AifJobCtrZero 101 /* Array Zero progress */
1709#define AifJobStsSuccess 1 /* Job completes */ 1716#define AifJobStsSuccess 1 /* Job completes */
1717#define AifJobStsRunning 102 /* Job running */
1710#define AifCmdAPIReport 3 /* Report from other user of API */ 1718#define AifCmdAPIReport 3 /* Report from other user of API */
1711#define AifCmdDriverNotify 4 /* Notify host driver of event */ 1719#define AifCmdDriverNotify 4 /* Notify host driver of event */
1712#define AifDenMorphComplete 200 /* A morph operation completed */ 1720#define AifDenMorphComplete 200 /* A morph operation completed */
@@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
1777struct aac_driver_ident* aac_get_driver_ident(int devtype); 1785struct aac_driver_ident* aac_get_driver_ident(int devtype);
1778int aac_get_adapter_info(struct aac_dev* dev); 1786int aac_get_adapter_info(struct aac_dev* dev);
1779int aac_send_shutdown(struct aac_dev *dev); 1787int aac_send_shutdown(struct aac_dev *dev);
1788int probe_container(struct aac_dev *dev, int cid);
1780extern int numacb; 1789extern int numacb;
1781extern int acbsize; 1790extern int acbsize;
1782extern char aac_driver_version[]; 1791extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 75abd0453289..59a341b2aedc 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
195 fibctx, 195 fibctx,
196 sizeof(struct aac_close), 196 sizeof(struct aac_close),
197 FsaNormal, 197 FsaNormal,
198 1, 1, 198 -2 /* Timeout silently */, 1,
199 NULL, NULL); 199 NULL, NULL);
200 200
201 if (status == 0) 201 if (status == 0)
@@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
313 dev->max_fib_size = sizeof(struct hw_fib); 313 dev->max_fib_size = sizeof(struct hw_fib);
314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
315 - sizeof(struct aac_fibhdr) 315 - sizeof(struct aac_fibhdr)
316 - sizeof(struct aac_write) + sizeof(struct sgmap)) 316 - sizeof(struct aac_write) + sizeof(struct sgentry))
317 / sizeof(struct sgmap); 317 / sizeof(struct sgentry);
318 dev->raw_io_64 = 0;
319 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
320 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
321 (status[0] == 0x00000001)) {
322 if (status[1] & AAC_OPT_NEW_COMM_64)
323 dev->raw_io_64 = 1;
324 }
318 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 325 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
319 0, 0, 0, 0, 0, 0, 326 0, 0, 0, 0, 0, 0,
320 status+0, status+1, status+2, status+3, status+4)) 327 status+0, status+1, status+2, status+3, status+4))
@@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
342 dev->max_fib_size = 512; 349 dev->max_fib_size = 512;
343 dev->sg_tablesize = host->sg_tablesize 350 dev->sg_tablesize = host->sg_tablesize
344 = (512 - sizeof(struct aac_fibhdr) 351 = (512 - sizeof(struct aac_fibhdr)
345 - sizeof(struct aac_write) + sizeof(struct sgmap)) 352 - sizeof(struct aac_write) + sizeof(struct sgentry))
346 / sizeof(struct sgmap); 353 / sizeof(struct sgentry);
347 host->can_queue = AAC_NUM_IO_FIB; 354 host->can_queue = AAC_NUM_IO_FIB;
348 } else if (acbsize == 2048) { 355 } else if (acbsize == 2048) {
349 host->max_sectors = 512; 356 host->max_sectors = 512;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1d303f03480..e4d543a474ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -39,7 +39,9 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <scsi/scsi_device.h>
42#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/delay.h>
43 45
44#include "aacraid.h" 46#include "aacraid.h"
45 47
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
269 /* Interrupt Moderation, only interrupt for first two entries */ 271 /* Interrupt Moderation, only interrupt for first two entries */
270 if (idx != le32_to_cpu(*(q->headers.consumer))) { 272 if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 if (--idx == 0) { 273 if (--idx == 0) {
272 if (qid == AdapHighCmdQueue) 274 if (qid == AdapNormCmdQueue)
273 idx = ADAP_HIGH_CMD_ENTRIES;
274 else if (qid == AdapNormCmdQueue)
275 idx = ADAP_NORM_CMD_ENTRIES; 275 idx = ADAP_NORM_CMD_ENTRIES;
276 else if (qid == AdapHighRespQueue) 276 else
277 idx = ADAP_HIGH_RESP_ENTRIES;
278 else if (qid == AdapNormRespQueue)
279 idx = ADAP_NORM_RESP_ENTRIES; 277 idx = ADAP_NORM_RESP_ENTRIES;
280 } 278 }
281 if (idx != le32_to_cpu(*(q->headers.consumer))) 279 if (idx != le32_to_cpu(*(q->headers.consumer)))
282 *nonotify = 1; 280 *nonotify = 1;
283 } 281 }
284 282
285 if (qid == AdapHighCmdQueue) { 283 if (qid == AdapNormCmdQueue) {
286 if (*index >= ADAP_HIGH_CMD_ENTRIES)
287 *index = 0;
288 } else if (qid == AdapNormCmdQueue) {
289 if (*index >= ADAP_NORM_CMD_ENTRIES) 284 if (*index >= ADAP_NORM_CMD_ENTRIES)
290 *index = 0; /* Wrap to front of the Producer Queue. */ 285 *index = 0; /* Wrap to front of the Producer Queue. */
291 } 286 } else {
292 else if (qid == AdapHighRespQueue)
293 {
294 if (*index >= ADAP_HIGH_RESP_ENTRIES)
295 *index = 0;
296 }
297 else if (qid == AdapNormRespQueue)
298 {
299 if (*index >= ADAP_NORM_RESP_ENTRIES) 287 if (*index >= ADAP_NORM_RESP_ENTRIES)
300 *index = 0; /* Wrap to front of the Producer Queue. */ 288 *index = 0; /* Wrap to front of the Producer Queue. */
301 } 289 }
302 else {
303 printk("aacraid: invalid qid\n");
304 BUG();
305 }
306 290
307 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
308 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
334{ 318{
335 struct aac_entry * entry = NULL; 319 struct aac_entry * entry = NULL;
336 int map = 0; 320 int map = 0;
337 struct aac_queue * q = &dev->queues->queue[qid];
338
339 spin_lock_irqsave(q->lock, q->SavedIrql);
340 321
341 if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 322 if (qid == AdapNormCmdQueue) {
342 {
343 /* if no entries wait for some if caller wants to */ 323 /* if no entries wait for some if caller wants to */
344 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify))
345 { 325 {
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
350 */ 330 */
351 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
352 map = 1; 332 map = 1;
353 } 333 } else {
354 else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
355 {
356 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify))
357 { 335 {
358 /* if no entries wait for some if caller wants to */ 336 /* if no entries wait for some if caller wants to */
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
375 return 0; 353 return 0;
376} 354}
377 355
378
379/**
380 * aac_insert_entry - insert a queue entry
381 * @dev: Adapter
382 * @index: Index of entry to insert
383 * @qid: Queue number
384 * @nonotify: Suppress adapter notification
385 *
386 * Gets the next free QE off the requested priorty adapter command
387 * queue and associates the Fib with the QE. The QE represented by
388 * index is ready to insert on the queue when this routine returns
389 * success.
390 */
391
392static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
393{
394 struct aac_queue * q = &dev->queues->queue[qid];
395
396 if(q == NULL)
397 BUG();
398 *(q->headers.producer) = cpu_to_le32(index + 1);
399 spin_unlock_irqrestore(q->lock, q->SavedIrql);
400
401 if (qid == AdapHighCmdQueue ||
402 qid == AdapNormCmdQueue ||
403 qid == AdapHighRespQueue ||
404 qid == AdapNormRespQueue)
405 {
406 if (!nonotify)
407 aac_adapter_notify(dev, qid);
408 }
409 else
410 printk("Suprise insert!\n");
411 return 0;
412}
413
414/* 356/*
415 * Define the highest level of host to adapter communication routines. 357 * Define the highest level of host to adapter communication routines.
416 * These routines will support host to adapter FS commuication. These 358 * These routines will support host to adapter FS commuication. These
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
439int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
440{ 382{
441 u32 index; 383 u32 index;
442 u32 qid;
443 struct aac_dev * dev = fibptr->dev; 384 struct aac_dev * dev = fibptr->dev;
444 unsigned long nointr = 0; 385 unsigned long nointr = 0;
445 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
446 struct aac_queue * q; 387 struct aac_queue * q;
447 unsigned long flags = 0; 388 unsigned long flags = 0;
389 unsigned long qflags;
390
448 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 391 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
449 return -EBUSY; 392 return -EBUSY;
450 /* 393 /*
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
497 * Get a queue entry connect the FIB to it and send an notify 440 * Get a queue entry connect the FIB to it and send an notify
498 * the adapter a command is ready. 441 * the adapter a command is ready.
499 */ 442 */
500 if (priority == FsaHigh) { 443 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
501 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
502 qid = AdapHighCmdQueue;
503 } else {
504 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
505 qid = AdapNormCmdQueue;
506 }
507 q = &dev->queues->queue[qid];
508 444
509 if(wait)
510 spin_lock_irqsave(&fibptr->event_lock, flags);
511 if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
512 return -EWOULDBLOCK;
513 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
514 dprintk((KERN_DEBUG "Fib contents:.\n"));
515 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
516 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
517 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
518 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
519 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
520 /* 445 /*
521 * Fill in the Callback and CallbackContext if we are not 446 * Fill in the Callback and CallbackContext if we are not
522 * going to wait. 447 * going to wait.
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
525 fibptr->callback = callback; 450 fibptr->callback = callback;
526 fibptr->callback_data = callback_data; 451 fibptr->callback_data = callback_data;
527 } 452 }
528 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
529 list_add_tail(&fibptr->queue, &q->pendingq);
530 q->numpending++;
531 453
532 fibptr->done = 0; 454 fibptr->done = 0;
533 fibptr->flags = 0; 455 fibptr->flags = 0;
534 456
535 if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) 457 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
536 return -EWOULDBLOCK; 458
459 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
460 dprintk((KERN_DEBUG "Fib contents:.\n"));
461 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
462 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
463 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466
467 q = &dev->queues->queue[AdapNormCmdQueue];
468
469 if(wait)
470 spin_lock_irqsave(&fibptr->event_lock, flags);
471 spin_lock_irqsave(q->lock, qflags);
472 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
473
474 list_add_tail(&fibptr->queue, &q->pendingq);
475 q->numpending++;
476 *(q->headers.producer) = cpu_to_le32(index + 1);
477 spin_unlock_irqrestore(q->lock, qflags);
478 if (!(nointr & aac_config.irq_mod))
479 aac_adapter_notify(dev, AdapNormCmdQueue);
537 /* 480 /*
538 * If the caller wanted us to wait for response wait now. 481 * If the caller wanted us to wait for response wait now.
539 */ 482 */
540 483
541 if (wait) { 484 if (wait) {
542 spin_unlock_irqrestore(&fibptr->event_lock, flags); 485 spin_unlock_irqrestore(&fibptr->event_lock, flags);
543 down(&fibptr->event_wait); 486 /* Only set for first known interruptable command */
487 if (wait < 0) {
488 /*
489 * *VERY* Dangerous to time out a command, the
490 * assumption is made that we have no hope of
491 * functioning because an interrupt routing or other
492 * hardware failure has occurred.
493 */
494 unsigned long count = 36000000L; /* 3 minutes */
495 unsigned long qflags;
496 while (down_trylock(&fibptr->event_wait)) {
497 if (--count == 0) {
498 spin_lock_irqsave(q->lock, qflags);
499 q->numpending--;
500 list_del(&fibptr->queue);
501 spin_unlock_irqrestore(q->lock, qflags);
502 if (wait == -1) {
503 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
504 "Usually a result of a PCI interrupt routing problem;\n"
505 "update mother board BIOS or consider utilizing one of\n"
506 "the SAFE mode kernel options (acpi, apic etc)\n");
507 }
508 return -ETIMEDOUT;
509 }
510 udelay(5);
511 }
512 } else
513 down(&fibptr->event_wait);
544 if(fibptr->done == 0) 514 if(fibptr->done == 0)
545 BUG(); 515 BUG();
546 516
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
622 case HostNormCmdQueue: 592 case HostNormCmdQueue:
623 notify = HostNormCmdNotFull; 593 notify = HostNormCmdNotFull;
624 break; 594 break;
625 case HostHighCmdQueue:
626 notify = HostHighCmdNotFull;
627 break;
628 case HostNormRespQueue: 595 case HostNormRespQueue:
629 notify = HostNormRespNotFull; 596 notify = HostNormRespNotFull;
630 break; 597 break;
631 case HostHighRespQueue:
632 notify = HostHighRespNotFull;
633 break;
634 default: 598 default:
635 BUG(); 599 BUG();
636 return; 600 return;
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
652{ 616{
653 struct hw_fib * hw_fib = fibptr->hw_fib; 617 struct hw_fib * hw_fib = fibptr->hw_fib;
654 struct aac_dev * dev = fibptr->dev; 618 struct aac_dev * dev = fibptr->dev;
619 struct aac_queue * q;
655 unsigned long nointr = 0; 620 unsigned long nointr = 0;
656 if (hw_fib->header.XferState == 0) 621 unsigned long qflags;
622
623 if (hw_fib->header.XferState == 0) {
657 return 0; 624 return 0;
625 }
658 /* 626 /*
659 * If we plan to do anything check the structure type first. 627 * If we plan to do anything check the structure type first.
660 */ 628 */
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
669 * send the completed cdb to the adapter. 637 * send the completed cdb to the adapter.
670 */ 638 */
671 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 639 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
640 u32 index;
672 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 641 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
673 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { 642 if (size) {
674 u32 index; 643 size += sizeof(struct aac_fibhdr);
675 if (size) 644 if (size > le16_to_cpu(hw_fib->header.SenderSize))
676 { 645 return -EMSGSIZE;
677 size += sizeof(struct aac_fibhdr); 646 hw_fib->header.Size = cpu_to_le16(size);
678 if (size > le16_to_cpu(hw_fib->header.SenderSize))
679 return -EMSGSIZE;
680 hw_fib->header.Size = cpu_to_le16(size);
681 }
682 if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
683 return -EWOULDBLOCK;
684 }
685 if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
686 }
687 } else if (hw_fib->header.XferState &
688 cpu_to_le32(NormalPriority)) {
689 u32 index;
690
691 if (size) {
692 size += sizeof(struct aac_fibhdr);
693 if (size > le16_to_cpu(hw_fib->header.SenderSize))
694 return -EMSGSIZE;
695 hw_fib->header.Size = cpu_to_le16(size);
696 }
697 if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
698 return -EWOULDBLOCK;
699 if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
700 {
701 }
702 } 647 }
648 q = &dev->queues->queue[AdapNormRespQueue];
649 spin_lock_irqsave(q->lock, qflags);
650 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
651 *(q->headers.producer) = cpu_to_le32(index + 1);
652 spin_unlock_irqrestore(q->lock, qflags);
653 if (!(nointr & (int)aac_config.irq_mod))
654 aac_adapter_notify(dev, AdapNormRespQueue);
703 } 655 }
704 else 656 else
705 { 657 {
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
791 memset(cp, 0, 256); 743 memset(cp, 0, 256);
792} 744}
793 745
746
747/**
748 * aac_handle_aif - Handle a message from the firmware
749 * @dev: Which adapter this fib is from
750 * @fibptr: Pointer to fibptr from adapter
751 *
752 * This routine handles a driver notify fib from the adapter and
753 * dispatches it to the appropriate routine for handling.
754 */
755
756static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
757{
758 struct hw_fib * hw_fib = fibptr->hw_fib;
759 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
760 int busy;
761 u32 container;
762 struct scsi_device *device;
763 enum {
764 NOTHING,
765 DELETE,
766 ADD,
767 CHANGE
768 } device_config_needed;
769
770 /* Sniff for container changes */
771
772 if (!dev)
773 return;
774 container = (u32)-1;
775
776 /*
777 * We have set this up to try and minimize the number of
778 * re-configures that take place. As a result of this when
779 * certain AIF's come in we will set a flag waiting for another
780 * type of AIF before setting the re-config flag.
781 */
782 switch (le32_to_cpu(aifcmd->command)) {
783 case AifCmdDriverNotify:
784 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
785 /*
786 * Morph or Expand complete
787 */
788 case AifDenMorphComplete:
789 case AifDenVolumeExtendComplete:
790 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
791 if (container >= dev->maximum_num_containers)
792 break;
793
794 /*
795 * Find the Scsi_Device associated with the SCSI
796 * address. Make sure we have the right array, and if
797 * so set the flag to initiate a new re-config once we
798 * see an AifEnConfigChange AIF come through.
799 */
800
801 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
802 device = scsi_device_lookup(dev->scsi_host_ptr,
803 CONTAINER_TO_CHANNEL(container),
804 CONTAINER_TO_ID(container),
805 CONTAINER_TO_LUN(container));
806 if (device) {
807 dev->fsa_dev[container].config_needed = CHANGE;
808 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
809 scsi_device_put(device);
810 }
811 }
812 }
813
814 /*
815 * If we are waiting on something and this happens to be
816 * that thing then set the re-configure flag.
817 */
818 if (container != (u32)-1) {
819 if (container >= dev->maximum_num_containers)
820 break;
821 if (dev->fsa_dev[container].config_waiting_on ==
822 le32_to_cpu(*(u32 *)aifcmd->data))
823 dev->fsa_dev[container].config_waiting_on = 0;
824 } else for (container = 0;
825 container < dev->maximum_num_containers; ++container) {
826 if (dev->fsa_dev[container].config_waiting_on ==
827 le32_to_cpu(*(u32 *)aifcmd->data))
828 dev->fsa_dev[container].config_waiting_on = 0;
829 }
830 break;
831
832 case AifCmdEventNotify:
833 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
834 /*
835 * Add an Array.
836 */
837 case AifEnAddContainer:
838 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
839 if (container >= dev->maximum_num_containers)
840 break;
841 dev->fsa_dev[container].config_needed = ADD;
842 dev->fsa_dev[container].config_waiting_on =
843 AifEnConfigChange;
844 break;
845
846 /*
847 * Delete an Array.
848 */
849 case AifEnDeleteContainer:
850 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
851 if (container >= dev->maximum_num_containers)
852 break;
853 dev->fsa_dev[container].config_needed = DELETE;
854 dev->fsa_dev[container].config_waiting_on =
855 AifEnConfigChange;
856 break;
857
858 /*
859 * Container change detected. If we currently are not
860 * waiting on something else, setup to wait on a Config Change.
861 */
862 case AifEnContainerChange:
863 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
864 if (container >= dev->maximum_num_containers)
865 break;
866 if (dev->fsa_dev[container].config_waiting_on)
867 break;
868 dev->fsa_dev[container].config_needed = CHANGE;
869 dev->fsa_dev[container].config_waiting_on =
870 AifEnConfigChange;
871 break;
872
873 case AifEnConfigChange:
874 break;
875
876 }
877
878 /*
879 * If we are waiting on something and this happens to be
880 * that thing then set the re-configure flag.
881 */
882 if (container != (u32)-1) {
883 if (container >= dev->maximum_num_containers)
884 break;
885 if (dev->fsa_dev[container].config_waiting_on ==
886 le32_to_cpu(*(u32 *)aifcmd->data))
887 dev->fsa_dev[container].config_waiting_on = 0;
888 } else for (container = 0;
889 container < dev->maximum_num_containers; ++container) {
890 if (dev->fsa_dev[container].config_waiting_on ==
891 le32_to_cpu(*(u32 *)aifcmd->data))
892 dev->fsa_dev[container].config_waiting_on = 0;
893 }
894 break;
895
896 case AifCmdJobProgress:
897 /*
898 * These are job progress AIF's. When a Clear is being
899 * done on a container it is initially created then hidden from
900 * the OS. When the clear completes we don't get a config
901 * change so we monitor the job status complete on a clear then
902 * wait for a container change.
903 */
904
905 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
906 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
907 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
908 for (container = 0;
909 container < dev->maximum_num_containers;
910 ++container) {
911 /*
912 * Stomp on all config sequencing for all
913 * containers?
914 */
915 dev->fsa_dev[container].config_waiting_on =
916 AifEnContainerChange;
917 dev->fsa_dev[container].config_needed = ADD;
918 }
919 }
920 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
921 && (((u32 *)aifcmd->data)[6] == 0)
922 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
923 for (container = 0;
924 container < dev->maximum_num_containers;
925 ++container) {
926 /*
927 * Stomp on all config sequencing for all
928 * containers?
929 */
930 dev->fsa_dev[container].config_waiting_on =
931 AifEnContainerChange;
932 dev->fsa_dev[container].config_needed = DELETE;
933 }
934 }
935 break;
936 }
937
938 device_config_needed = NOTHING;
939 for (container = 0; container < dev->maximum_num_containers;
940 ++container) {
941 if ((dev->fsa_dev[container].config_waiting_on == 0)
942 && (dev->fsa_dev[container].config_needed != NOTHING)) {
943 device_config_needed =
944 dev->fsa_dev[container].config_needed;
945 dev->fsa_dev[container].config_needed = NOTHING;
946 break;
947 }
948 }
949 if (device_config_needed == NOTHING)
950 return;
951
952 /*
953 * If we decided that a re-configuration needs to be done,
954 * schedule it here on the way out the door, please close the door
955 * behind you.
956 */
957
958 busy = 0;
959
960
961 /*
962 * Find the Scsi_Device associated with the SCSI address,
963 * and mark it as changed, invalidating the cache. This deals
964 * with changes to existing device IDs.
965 */
966
967 if (!dev || !dev->scsi_host_ptr)
968 return;
969 /*
970 * force reload of disk info via probe_container
971 */
972 if ((device_config_needed == CHANGE)
973 && (dev->fsa_dev[container].valid == 1))
974 dev->fsa_dev[container].valid = 2;
975 if ((device_config_needed == CHANGE) ||
976 (device_config_needed == ADD))
977 probe_container(dev, container);
978 device = scsi_device_lookup(dev->scsi_host_ptr,
979 CONTAINER_TO_CHANNEL(container),
980 CONTAINER_TO_ID(container),
981 CONTAINER_TO_LUN(container));
982 if (device) {
983 switch (device_config_needed) {
984 case DELETE:
985 scsi_remove_device(device);
986 break;
987 case CHANGE:
988 if (!dev->fsa_dev[container].valid) {
989 scsi_remove_device(device);
990 break;
991 }
992 scsi_rescan_device(&device->sdev_gendev);
993
994 default:
995 break;
996 }
997 scsi_device_put(device);
998 }
999 if (device_config_needed == ADD) {
1000 scsi_add_device(dev->scsi_host_ptr,
1001 CONTAINER_TO_CHANNEL(container),
1002 CONTAINER_TO_ID(container),
1003 CONTAINER_TO_LUN(container));
1004 }
1005
1006}
1007
794/** 1008/**
795 * aac_command_thread - command processing thread 1009 * aac_command_thread - command processing thread
796 * @dev: Adapter to monitor 1010 * @dev: Adapter to monitor
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
805{ 1019{
806 struct hw_fib *hw_fib, *hw_newfib; 1020 struct hw_fib *hw_fib, *hw_newfib;
807 struct fib *fib, *newfib; 1021 struct fib *fib, *newfib;
808 struct aac_queue_block *queues = dev->queues;
809 struct aac_fib_context *fibctx; 1022 struct aac_fib_context *fibctx;
810 unsigned long flags; 1023 unsigned long flags;
811 DECLARE_WAITQUEUE(wait, current); 1024 DECLARE_WAITQUEUE(wait, current);
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
825 * Let the DPC know it has a place to send the AIF's to. 1038 * Let the DPC know it has a place to send the AIF's to.
826 */ 1039 */
827 dev->aif_thread = 1; 1040 dev->aif_thread = 1;
828 add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1041 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
829 set_current_state(TASK_INTERRUPTIBLE); 1042 set_current_state(TASK_INTERRUPTIBLE);
1043 dprintk ((KERN_INFO "aac_command_thread start\n"));
830 while(1) 1044 while(1)
831 { 1045 {
832 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); 1046 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
833 while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { 1047 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
834 struct list_head *entry; 1048 struct list_head *entry;
835 struct aac_aifcmd * aifcmd; 1049 struct aac_aifcmd * aifcmd;
836 1050
837 set_current_state(TASK_RUNNING); 1051 set_current_state(TASK_RUNNING);
838 1052
839 entry = queues->queue[HostNormCmdQueue].cmdq.next; 1053 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
840 list_del(entry); 1054 list_del(entry);
841 1055
842 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1056 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
843 fib = list_entry(entry, struct fib, fiblink); 1057 fib = list_entry(entry, struct fib, fiblink);
844 /* 1058 /*
845 * We will process the FIB here or pass it to a 1059 * We will process the FIB here or pass it to a
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
860 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1074 aifcmd = (struct aac_aifcmd *) hw_fib->data;
861 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1075 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
862 /* Handle Driver Notify Events */ 1076 /* Handle Driver Notify Events */
1077 aac_handle_aif(dev, fib);
863 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1078 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
864 fib_adapter_complete(fib, (u16)sizeof(u32)); 1079 fib_adapter_complete(fib, (u16)sizeof(u32));
865 } else { 1080 } else {
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
869 1084
870 u32 time_now, time_last; 1085 u32 time_now, time_last;
871 unsigned long flagv; 1086 unsigned long flagv;
872 1087 unsigned num;
1088 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1089 struct fib ** fib_pool, ** fib_p;
1090
1091 /* Sniff events */
1092 if ((aifcmd->command ==
1093 cpu_to_le32(AifCmdEventNotify)) ||
1094 (aifcmd->command ==
1095 cpu_to_le32(AifCmdJobProgress))) {
1096 aac_handle_aif(dev, fib);
1097 }
1098
873 time_now = jiffies/HZ; 1099 time_now = jiffies/HZ;
874 1100
1101 /*
1102 * Warning: no sleep allowed while
1103 * holding spinlock. We take the estimate
1104 * and pre-allocate a set of fibs outside the
1105 * lock.
1106 */
1107 num = le32_to_cpu(dev->init->AdapterFibsSize)
1108 / sizeof(struct hw_fib); /* some extra */
1109 spin_lock_irqsave(&dev->fib_lock, flagv);
1110 entry = dev->fib_list.next;
1111 while (entry != &dev->fib_list) {
1112 entry = entry->next;
1113 ++num;
1114 }
1115 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1116 hw_fib_pool = NULL;
1117 fib_pool = NULL;
1118 if (num
1119 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1120 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1121 hw_fib_p = hw_fib_pool;
1122 fib_p = fib_pool;
1123 while (hw_fib_p < &hw_fib_pool[num]) {
1124 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1125 --hw_fib_p;
1126 break;
1127 }
1128 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1129 kfree(*(--hw_fib_p));
1130 break;
1131 }
1132 }
1133 if ((num = hw_fib_p - hw_fib_pool) == 0) {
1134 kfree(fib_pool);
1135 fib_pool = NULL;
1136 kfree(hw_fib_pool);
1137 hw_fib_pool = NULL;
1138 }
1139 } else if (hw_fib_pool) {
1140 kfree(hw_fib_pool);
1141 hw_fib_pool = NULL;
1142 }
875 spin_lock_irqsave(&dev->fib_lock, flagv); 1143 spin_lock_irqsave(&dev->fib_lock, flagv);
876 entry = dev->fib_list.next; 1144 entry = dev->fib_list.next;
877 /* 1145 /*
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
880 * fib, and then set the event to wake up the 1148 * fib, and then set the event to wake up the
881 * thread that is waiting for it. 1149 * thread that is waiting for it.
882 */ 1150 */
1151 hw_fib_p = hw_fib_pool;
1152 fib_p = fib_pool;
883 while (entry != &dev->fib_list) { 1153 while (entry != &dev->fib_list) {
884 /* 1154 /*
885 * Extract the fibctx 1155 * Extract the fibctx
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
912 * Warning: no sleep allowed while 1182 * Warning: no sleep allowed while
913 * holding spinlock 1183 * holding spinlock
914 */ 1184 */
915 hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); 1185 if (hw_fib_p < &hw_fib_pool[num]) {
916 newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); 1186 hw_newfib = *hw_fib_p;
917 if (newfib && hw_newfib) { 1187 *(hw_fib_p++) = NULL;
1188 newfib = *fib_p;
1189 *(fib_p++) = NULL;
918 /* 1190 /*
919 * Make the copy of the FIB 1191 * Make the copy of the FIB
920 */ 1192 */
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
929 fibctx->count++; 1201 fibctx->count++;
930 /* 1202 /*
931 * Set the event to wake up the 1203 * Set the event to wake up the
932 * thread that will waiting. 1204 * thread that is waiting.
933 */ 1205 */
934 up(&fibctx->wait_sem); 1206 up(&fibctx->wait_sem);
935 } else { 1207 } else {
936 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1208 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
937 if(newfib)
938 kfree(newfib);
939 if(hw_newfib)
940 kfree(hw_newfib);
941 } 1209 }
942 entry = entry->next; 1210 entry = entry->next;
943 } 1211 }
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
947 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1215 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
948 fib_adapter_complete(fib, sizeof(u32)); 1216 fib_adapter_complete(fib, sizeof(u32));
949 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1217 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1218 /* Free up the remaining resources */
1219 hw_fib_p = hw_fib_pool;
1220 fib_p = fib_pool;
1221 while (hw_fib_p < &hw_fib_pool[num]) {
1222 if (*hw_fib_p)
1223 kfree(*hw_fib_p);
1224 if (*fib_p)
1225 kfree(*fib_p);
1226 ++fib_p;
1227 ++hw_fib_p;
1228 }
1229 if (hw_fib_pool)
1230 kfree(hw_fib_pool);
1231 if (fib_pool)
1232 kfree(fib_pool);
950 } 1233 }
951 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
952 kfree(fib); 1234 kfree(fib);
1235 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
953 } 1236 }
954 /* 1237 /*
955 * There are no more AIF's 1238 * There are no more AIF's
956 */ 1239 */
957 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1240 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
958 schedule(); 1241 schedule();
959 1242
960 if(signal_pending(current)) 1243 if(signal_pending(current))
961 break; 1244 break;
962 set_current_state(TASK_INTERRUPTIBLE); 1245 set_current_state(TASK_INTERRUPTIBLE);
963 } 1246 }
964 remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1247 if (dev->queues)
1248 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
965 dev->aif_thread = 0; 1249 dev->aif_thread = 0;
966 complete_and_exit(&dev->aif_completion, 0); 1250 complete_and_exit(&dev->aif_completion, 0);
1251 return 0;
967} 1252}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff29d7f5825..de8490a92831 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
748 unique_id++; 748 unique_id++;
749 } 749 }
750 750
751 if (pci_enable_device(pdev)) 751 error = pci_enable_device(pdev);
752 if (error)
752 goto out; 753 goto out;
753 754
754 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || 755 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
@@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
772 shost->irq = pdev->irq; 773 shost->irq = pdev->irq;
773 shost->base = pci_resource_start(pdev, 0); 774 shost->base = pci_resource_start(pdev, 0);
774 shost->unique_id = unique_id; 775 shost->unique_id = unique_id;
776 shost->max_cmd_len = 16;
775 777
776 aac = (struct aac_dev *)shost->hostdata; 778 aac = (struct aac_dev *)shost->hostdata;
777 aac->scsi_host_ptr = shost; 779 aac->scsi_host_ptr = shost;
@@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
799 goto out_free_fibs; 801 goto out_free_fibs;
800 802
801 aac->maximum_num_channels = aac_drivers[index].channels; 803 aac->maximum_num_channels = aac_drivers[index].channels;
802 aac_get_adapter_info(aac); 804 error = aac_get_adapter_info(aac);
805 if (error < 0)
806 goto out_deinit;
803 807
804 /* 808 /*
805 * Lets override negotiations and drop the maximum SG limit to 34 809 * Lets override negotiations and drop the maximum SG limit to 34
@@ -927,8 +931,8 @@ static int __init aac_init(void)
927 printk(KERN_INFO "Adaptec %s driver (%s)\n", 931 printk(KERN_INFO "Adaptec %s driver (%s)\n",
928 AAC_DRIVERNAME, aac_driver_version); 932 AAC_DRIVERNAME, aac_driver_version);
929 933
930 error = pci_module_init(&aac_pci_driver); 934 error = pci_register_driver(&aac_pci_driver);
931 if (error) 935 if (error < 0)
932 return error; 936 return error;
933 937
934 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 938 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 70c5fb59c9ea..d754b3267863 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
112 struct ahc_softc *ahc = dev_get_drvdata(dev); 112 struct ahc_softc *ahc = dev_get_drvdata(dev);
113 u_long s; 113 u_long s;
114 114
115 if (ahc->platform_data && ahc->platform_data->host)
116 scsi_remove_host(ahc->platform_data->host);
117
115 ahc_lock(ahc, &s); 118 ahc_lock(ahc, &s);
116 ahc_intr_enable(ahc, FALSE); 119 ahc_intr_enable(ahc, FALSE);
117 ahc_unlock(ahc, &s); 120 ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6b6d4e287793..95c285cc83e4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
1192 int i, j; 1192 int i, j;
1193 1193
1194 if (ahd->platform_data != NULL) { 1194 if (ahd->platform_data != NULL) {
1195 if (ahd->platform_data->host != NULL) {
1196 scsi_remove_host(ahd->platform_data->host);
1197 scsi_host_put(ahd->platform_data->host);
1198 }
1199
1200 /* destroy all of the device and target objects */ 1195 /* destroy all of the device and target objects */
1201 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1196 for (i = 0; i < AHD_NUM_TARGETS; i++) {
1202 starget = ahd->platform_data->starget[i]; 1197 starget = ahd->platform_data->starget[i];
@@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
1226 release_mem_region(ahd->platform_data->mem_busaddr, 1221 release_mem_region(ahd->platform_data->mem_busaddr,
1227 0x1000); 1222 0x1000);
1228 } 1223 }
1224 if (ahd->platform_data->host)
1225 scsi_host_put(ahd->platform_data->host);
1226
1229 free(ahd->platform_data, M_DEVBUF); 1227 free(ahd->platform_data, M_DEVBUF);
1230 } 1228 }
1231} 1229}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 390b53852d4b..bf360ae021ab 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
95 struct ahd_softc *ahd = pci_get_drvdata(pdev); 95 struct ahd_softc *ahd = pci_get_drvdata(pdev);
96 u_long s; 96 u_long s;
97 97
98 if (ahd->platform_data && ahd->platform_data->host)
99 scsi_remove_host(ahd->platform_data->host);
100
98 ahd_lock(ahd, &s); 101 ahd_lock(ahd, &s);
99 ahd_intr_enable(ahd, FALSE); 102 ahd_intr_enable(ahd, FALSE);
100 ahd_unlock(ahd, &s); 103 ahd_unlock(ahd, &s);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 876d1de8480d..6ee1435d37fa 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1209,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
1209 int i, j; 1209 int i, j;
1210 1210
1211 if (ahc->platform_data != NULL) { 1211 if (ahc->platform_data != NULL) {
1212 if (ahc->platform_data->host != NULL) {
1213 scsi_remove_host(ahc->platform_data->host);
1214 scsi_host_put(ahc->platform_data->host);
1215 }
1216
1217 /* destroy all of the device and target objects */ 1212 /* destroy all of the device and target objects */
1218 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1213 for (i = 0; i < AHC_NUM_TARGETS; i++) {
1219 starget = ahc->platform_data->starget[i]; 1214 starget = ahc->platform_data->starget[i];
@@ -1242,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
1242 0x1000); 1237 0x1000);
1243 } 1238 }
1244 1239
1240 if (ahc->platform_data->host)
1241 scsi_host_put(ahc->platform_data->host);
1242
1245 free(ahc->platform_data, M_DEVBUF); 1243 free(ahc->platform_data, M_DEVBUF);
1246 } 1244 }
1247} 1245}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3ce77ddc889e..cb30d9c1153d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
143 struct ahc_softc *ahc = pci_get_drvdata(pdev); 143 struct ahc_softc *ahc = pci_get_drvdata(pdev);
144 u_long s; 144 u_long s;
145 145
146 if (ahc->platform_data && ahc->platform_data->host)
147 scsi_remove_host(ahc->platform_data->host);
148
146 ahc_lock(ahc, &s); 149 ahc_lock(ahc, &s);
147 ahc_intr_enable(ahc, FALSE); 150 ahc_intr_enable(ahc, FALSE);
148 ahc_unlock(ahc, &s); 151 ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 87e0c36f1554..d71cef767cec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap)
442 * piix_set_piomode - Initialize host controller PATA PIO timings 442 * piix_set_piomode - Initialize host controller PATA PIO timings
443 * @ap: Port whose timings we are configuring 443 * @ap: Port whose timings we are configuring
444 * @adev: um 444 * @adev: um
445 * @pio: PIO mode, 0 - 4
446 * 445 *
447 * Set PIO mode for device, in host controller PCI config space. 446 * Set PIO mode for device, in host controller PCI config space.
448 * 447 *
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f2a72d33132c..02fe371b0ab8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -176,6 +176,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
176 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
177 class_device_unregister(&shost->shost_classdev); 177 class_device_unregister(&shost->shost_classdev);
178 device_del(&shost->shost_gendev); 178 device_del(&shost->shost_gendev);
179 scsi_proc_hostdir_rm(shost->hostt);
179} 180}
180EXPORT_SYMBOL(scsi_remove_host); 181EXPORT_SYMBOL(scsi_remove_host);
181 182
@@ -262,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
262 if (shost->work_q) 263 if (shost->work_q)
263 destroy_workqueue(shost->work_q); 264 destroy_workqueue(shost->work_q);
264 265
265 scsi_proc_hostdir_rm(shost->hostt);
266 scsi_destroy_command_freelist(shost); 266 scsi_destroy_command_freelist(shost);
267 kfree(shost->shost_data); 267 kfree(shost->shost_data);
268 268
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index d92273cbe0de..e5b01997117a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -4132,6 +4132,53 @@ err_out:
4132} 4132}
4133 4133
4134/** 4134/**
4135 * ata_host_set_remove - PCI layer callback for device removal
4136 * @host_set: ATA host set that was removed
4137 *
4138 * Unregister all objects associated with this host set. Free those
4139 * objects.
4140 *
4141 * LOCKING:
4142 * Inherited from calling layer (may sleep).
4143 */
4144
4145
4146void ata_host_set_remove(struct ata_host_set *host_set)
4147{
4148 struct ata_port *ap;
4149 unsigned int i;
4150
4151 for (i = 0; i < host_set->n_ports; i++) {
4152 ap = host_set->ports[i];
4153 scsi_remove_host(ap->host);
4154 }
4155
4156 free_irq(host_set->irq, host_set);
4157
4158 for (i = 0; i < host_set->n_ports; i++) {
4159 ap = host_set->ports[i];
4160
4161 ata_scsi_release(ap->host);
4162
4163 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4164 struct ata_ioports *ioaddr = &ap->ioaddr;
4165
4166 if (ioaddr->cmd_addr == 0x1f0)
4167 release_region(0x1f0, 8);
4168 else if (ioaddr->cmd_addr == 0x170)
4169 release_region(0x170, 8);
4170 }
4171
4172 scsi_host_put(ap->host);
4173 }
4174
4175 if (host_set->ops->host_stop)
4176 host_set->ops->host_stop(host_set);
4177
4178 kfree(host_set);
4179}
4180
4181/**
4135 * ata_scsi_release - SCSI layer callback hook for host unload 4182 * ata_scsi_release - SCSI layer callback hook for host unload
4136 * @host: libata host to be unloaded 4183 * @host: libata host to be unloaded
4137 * 4184 *
@@ -4471,39 +4518,8 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4471{ 4518{
4472 struct device *dev = pci_dev_to_dev(pdev); 4519 struct device *dev = pci_dev_to_dev(pdev);
4473 struct ata_host_set *host_set = dev_get_drvdata(dev); 4520 struct ata_host_set *host_set = dev_get_drvdata(dev);
4474 struct ata_port *ap;
4475 unsigned int i;
4476
4477 for (i = 0; i < host_set->n_ports; i++) {
4478 ap = host_set->ports[i];
4479
4480 scsi_remove_host(ap->host);
4481 }
4482
4483 free_irq(host_set->irq, host_set);
4484
4485 for (i = 0; i < host_set->n_ports; i++) {
4486 ap = host_set->ports[i];
4487
4488 ata_scsi_release(ap->host);
4489
4490 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4491 struct ata_ioports *ioaddr = &ap->ioaddr;
4492
4493 if (ioaddr->cmd_addr == 0x1f0)
4494 release_region(0x1f0, 8);
4495 else if (ioaddr->cmd_addr == 0x170)
4496 release_region(0x170, 8);
4497 }
4498
4499 scsi_host_put(ap->host);
4500 }
4501
4502 if (host_set->ops->host_stop)
4503 host_set->ops->host_stop(host_set);
4504
4505 kfree(host_set);
4506 4521
4522 ata_host_set_remove(host_set);
4507 pci_release_regions(pdev); 4523 pci_release_regions(pdev);
4508 pci_disable_device(pdev); 4524 pci_disable_device(pdev);
4509 dev_set_drvdata(dev, NULL); 4525 dev_set_drvdata(dev, NULL);
@@ -4573,6 +4589,7 @@ module_exit(ata_exit);
4573EXPORT_SYMBOL_GPL(ata_std_bios_param); 4589EXPORT_SYMBOL_GPL(ata_std_bios_param);
4574EXPORT_SYMBOL_GPL(ata_std_ports); 4590EXPORT_SYMBOL_GPL(ata_std_ports);
4575EXPORT_SYMBOL_GPL(ata_device_add); 4591EXPORT_SYMBOL_GPL(ata_device_add);
4592EXPORT_SYMBOL_GPL(ata_host_set_remove);
4576EXPORT_SYMBOL_GPL(ata_sg_init); 4593EXPORT_SYMBOL_GPL(ata_sg_init);
4577EXPORT_SYMBOL_GPL(ata_sg_init_one); 4594EXPORT_SYMBOL_GPL(ata_sg_init_one);
4578EXPORT_SYMBOL_GPL(ata_qc_complete); 4595EXPORT_SYMBOL_GPL(ata_qc_complete);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 86eaf6d408d5..acae7c48ef7d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
973 if ((phba->fc_flag & FC_FABRIC) || 973 if ((phba->fc_flag & FC_FABRIC) ||
974 ((phba->fc_topology == TOPOLOGY_LOOP) && 974 ((phba->fc_topology == TOPOLOGY_LOOP) &&
975 (phba->fc_flag & FC_PUBLIC_LOOP))) 975 (phba->fc_flag & FC_PUBLIC_LOOP)))
976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn); 976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
977 else 977 else
978 /* fabric is local port if there is no F/FL_Port */ 978 /* fabric is local port if there is no F/FL_Port */
979 node_name = wwn_to_u64(phba->fc_nodename.wwn); 979 node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
980 980
981 spin_unlock_irq(shost->host_lock); 981 spin_unlock_irq(shost->host_lock);
982 982
@@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
1110 /* Search the mapped list for this target ID */ 1110 /* Search the mapped list for this target ID */
1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1112 if (starget->id == ndlp->nlp_sid) { 1112 if (starget->id == ndlp->nlp_sid) {
1113 node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1113 node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1114 break; 1114 break;
1115 } 1115 }
1116 } 1116 }
@@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1131 /* Search the mapped list for this target ID */ 1131 /* Search the mapped list for this target ID */
1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1133 if (starget->id == ndlp->nlp_sid) { 1133 if (starget->id == ndlp->nlp_sid) {
1134 port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1134 port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1135 break; 1135 break;
1136 } 1136 }
1137 } 1137 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4fb8eb0c84cf..56052f4510c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1019 struct fc_rport_identifiers rport_ids; 1019 struct fc_rport_identifiers rport_ids;
1020 1020
1021 /* Remote port has reappeared. Re-register w/ FC transport */ 1021 /* Remote port has reappeared. Re-register w/ FC transport */
1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1024 rport_ids.port_id = ndlp->nlp_DID; 1024 rport_ids.port_id = ndlp->nlp_DID;
1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1026 if (ndlp->nlp_type & NLP_FCP_TARGET) 1026 if (ndlp->nlp_type & NLP_FCP_TARGET)
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 047a87c26cc0..86c41981188b 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -280,9 +280,9 @@ struct lpfc_name {
280#define NAME_CCITT_GR_TYPE 0xE 280#define NAME_CCITT_GR_TYPE 0xE
281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ 281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
282 uint8_t IEEE[6]; /* FC IEEE address */ 282 uint8_t IEEE[6]; /* FC IEEE address */
283 }; 283 } s;
284 uint8_t wwn[8]; 284 uint8_t wwn[8];
285 }; 285 } u;
286}; 286};
287 287
288struct csp { 288struct csp {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 454058f655db..0856ff7d3b33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
285 if (phba->SerialNumber[0] == 0) { 285 if (phba->SerialNumber[0] == 0) {
286 uint8_t *outptr; 286 uint8_t *outptr;
287 287
288 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0]; 288 outptr = &phba->fc_nodename.u.s.IEEE[0];
289 for (i = 0; i < 12; i++) { 289 for (i = 0; i < 12; i++) {
290 status = *outptr++; 290 status = *outptr++;
291 j = ((status & 0xf0) >> 4); 291 j = ((status & 0xf0) >> 4);
@@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1523 * Must done after lpfc_sli_hba_setup() 1523 * Must done after lpfc_sli_hba_setup()
1524 */ 1524 */
1525 1525
1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn); 1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn); 1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
1528 fc_host_supported_classes(host) = FC_COS_CLASS3; 1528 fc_host_supported_classes(host) = FC_COS_CLASS3;
1529 1529
1530 memset(fc_host_supported_fc4s(host), 0, 1530 memset(fc_host_supported_fc4s(host), 0,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 6f308ebe3e79..61a6fd810bb4 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
621 if(islogical) { 621 if(islogical) {
622 switch (cmd->cmnd[0]) { 622 switch (cmd->cmnd[0]) {
623 case TEST_UNIT_READY: 623 case TEST_UNIT_READY:
624 memset(cmd->request_buffer, 0, cmd->request_bufflen);
625
626#if MEGA_HAVE_CLUSTERING 624#if MEGA_HAVE_CLUSTERING
627 /* 625 /*
628 * Do we support clustering and is the support enabled 626 * Do we support clustering and is the support enabled
@@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
652 return NULL; 650 return NULL;
653#endif 651#endif
654 652
655 case MODE_SENSE: 653 case MODE_SENSE: {
654 char *buf;
655
656 if (cmd->use_sg) {
657 struct scatterlist *sg;
658
659 sg = (struct scatterlist *)cmd->request_buffer;
660 buf = kmap_atomic(sg->page, KM_IRQ0) +
661 sg->offset;
662 } else
663 buf = cmd->request_buffer;
656 memset(cmd->request_buffer, 0, cmd->cmnd[4]); 664 memset(cmd->request_buffer, 0, cmd->cmnd[4]);
665 if (cmd->use_sg) {
666 struct scatterlist *sg;
667
668 sg = (struct scatterlist *)cmd->request_buffer;
669 kunmap_atomic(buf - sg->offset, KM_IRQ0);
670 }
657 cmd->result = (DID_OK << 16); 671 cmd->result = (DID_OK << 16);
658 cmd->scsi_done(cmd); 672 cmd->scsi_done(cmd);
659 return NULL; 673 return NULL;
674 }
660 675
661 case READ_CAPACITY: 676 case READ_CAPACITY:
662 case INQUIRY: 677 case INQUIRY:
@@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
1685static void 1700static void
1686mega_free_scb(adapter_t *adapter, scb_t *scb) 1701mega_free_scb(adapter_t *adapter, scb_t *scb)
1687{ 1702{
1703 unsigned long length;
1704
1688 switch( scb->dma_type ) { 1705 switch( scb->dma_type ) {
1689 1706
1690 case MEGA_DMA_TYPE_NONE: 1707 case MEGA_DMA_TYPE_NONE:
1691 break; 1708 break;
1692 1709
1693 case MEGA_BULK_DATA: 1710 case MEGA_BULK_DATA:
1711 if (scb->cmd->use_sg == 0)
1712 length = scb->cmd->request_bufflen;
1713 else {
1714 struct scatterlist *sgl =
1715 (struct scatterlist *)scb->cmd->request_buffer;
1716 length = sgl->length;
1717 }
1694 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata, 1718 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
1695 scb->cmd->request_bufflen, scb->dma_direction); 1719 length, scb->dma_direction);
1696 break; 1720 break;
1697 1721
1698 case MEGA_SGLIST: 1722 case MEGA_SGLIST:
@@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1741 struct scatterlist *sgl; 1765 struct scatterlist *sgl;
1742 struct page *page; 1766 struct page *page;
1743 unsigned long offset; 1767 unsigned long offset;
1768 unsigned int length;
1744 Scsi_Cmnd *cmd; 1769 Scsi_Cmnd *cmd;
1745 int sgcnt; 1770 int sgcnt;
1746 int idx; 1771 int idx;
@@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1748 cmd = scb->cmd; 1773 cmd = scb->cmd;
1749 1774
1750 /* Scatter-gather not used */ 1775 /* Scatter-gather not used */
1751 if( !cmd->use_sg ) { 1776 if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
1752 1777 !adapter->has_64bit_addr)) {
1753 page = virt_to_page(cmd->request_buffer); 1778
1754 offset = offset_in_page(cmd->request_buffer); 1779 if (cmd->use_sg == 0) {
1780 page = virt_to_page(cmd->request_buffer);
1781 offset = offset_in_page(cmd->request_buffer);
1782 length = cmd->request_bufflen;
1783 } else {
1784 sgl = (struct scatterlist *)cmd->request_buffer;
1785 page = sgl->page;
1786 offset = sgl->offset;
1787 length = sgl->length;
1788 }
1755 1789
1756 scb->dma_h_bulkdata = pci_map_page(adapter->dev, 1790 scb->dma_h_bulkdata = pci_map_page(adapter->dev,
1757 page, offset, 1791 page, offset,
1758 cmd->request_bufflen, 1792 length,
1759 scb->dma_direction); 1793 scb->dma_direction);
1760 scb->dma_type = MEGA_BULK_DATA; 1794 scb->dma_type = MEGA_BULK_DATA;
1761 1795
@@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1765 */ 1799 */
1766 if( adapter->has_64bit_addr ) { 1800 if( adapter->has_64bit_addr ) {
1767 scb->sgl64[0].address = scb->dma_h_bulkdata; 1801 scb->sgl64[0].address = scb->dma_h_bulkdata;
1768 scb->sgl64[0].length = cmd->request_bufflen; 1802 scb->sgl64[0].length = length;
1769 *buf = (u32)scb->sgl_dma_addr; 1803 *buf = (u32)scb->sgl_dma_addr;
1770 *len = (u32)cmd->request_bufflen; 1804 *len = (u32)length;
1771 return 1; 1805 return 1;
1772 } 1806 }
1773 else { 1807 else {
1774 *buf = (u32)scb->dma_h_bulkdata; 1808 *buf = (u32)scb->dma_h_bulkdata;
1775 *len = (u32)cmd->request_bufflen; 1809 *len = (u32)length;
1776 } 1810 }
1777 return 0; 1811 return 0;
1778 } 1812 }
@@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1791 1825
1792 if( sgcnt > adapter->sglen ) BUG(); 1826 if( sgcnt > adapter->sglen ) BUG();
1793 1827
1828 *len = 0;
1829
1794 for( idx = 0; idx < sgcnt; idx++, sgl++ ) { 1830 for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
1795 1831
1796 if( adapter->has_64bit_addr ) { 1832 if( adapter->has_64bit_addr ) {
1797 scb->sgl64[idx].address = sg_dma_address(sgl); 1833 scb->sgl64[idx].address = sg_dma_address(sgl);
1798 scb->sgl64[idx].length = sg_dma_len(sgl); 1834 *len += scb->sgl64[idx].length = sg_dma_len(sgl);
1799 } 1835 }
1800 else { 1836 else {
1801 scb->sgl[idx].address = sg_dma_address(sgl); 1837 scb->sgl[idx].address = sg_dma_address(sgl);
1802 scb->sgl[idx].length = sg_dma_len(sgl); 1838 *len += scb->sgl[idx].length = sg_dma_len(sgl);
1803 } 1839 }
1804 } 1840 }
1805 1841
1806 /* Reset pointer and length fields */ 1842 /* Reset pointer and length fields */
1807 *buf = scb->sgl_dma_addr; 1843 *buf = scb->sgl_dma_addr;
1808 1844
1809 /*
1810 * For passthru command, dataxferlen must be set, even for commands
1811 * with a sg list
1812 */
1813 *len = (u32)cmd->request_bufflen;
1814
1815 /* Return count of SG requests */ 1845 /* Return count of SG requests */
1816 return sgcnt; 1846 return sgcnt;
1817} 1847}
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 917d591d90b2..7363e12663ac 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -76,3 +76,12 @@ config MEGARAID_LEGACY
76 To compile this driver as a module, choose M here: the 76 To compile this driver as a module, choose M here: the
77 module will be called megaraid 77 module will be called megaraid
78endif 78endif
79
80config MEGARAID_SAS
81 tristate "LSI Logic MegaRAID SAS RAID Module"
82 depends on PCI && SCSI
83 help
84 Module for LSI Logic's SAS based RAID controllers.
85 To compile this driver as a module, choose 'm' here.
86 Module will be called megaraid_sas
87
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6dd99f275722..f469915b97c3 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o 1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o 2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
3obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
new file mode 100644
index 000000000000..1b3148e842af
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -0,0 +1,2805 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.00-rc4
14 *
15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
17 * Sumant Patro <Sumant.Patro@lsil.com>
18 *
19 * List of supported controllers
20 *
21 * OEM Product Name VID DID SSVID SSID
22 * --- ------------ --- --- ---- ----
23 */
24
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/list.h>
29#include <linux/version.h>
30#include <linux/moduleparam.h>
31#include <linux/module.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/uio.h>
36#include <asm/uaccess.h>
37#include <linux/compat.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include "megaraid_sas.h"
44
45MODULE_LICENSE("GPL");
46MODULE_VERSION(MEGASAS_VERSION);
47MODULE_AUTHOR("sreenivas.bagalkote@lsil.com");
48MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
49
50/*
51 * PCI ID table for all supported controllers
52 */
53static struct pci_device_id megasas_pci_table[] = {
54
55 {
56 PCI_VENDOR_ID_LSI_LOGIC,
57 PCI_DEVICE_ID_LSI_SAS1064R,
58 PCI_ANY_ID,
59 PCI_ANY_ID,
60 },
61 {
62 PCI_VENDOR_ID_DELL,
63 PCI_DEVICE_ID_DELL_PERC5,
64 PCI_ANY_ID,
65 PCI_ANY_ID,
66 },
67 {0} /* Terminating entry */
68};
69
70MODULE_DEVICE_TABLE(pci, megasas_pci_table);
71
72static int megasas_mgmt_majorno;
73static struct megasas_mgmt_info megasas_mgmt_info;
74static struct fasync_struct *megasas_async_queue;
75static DECLARE_MUTEX(megasas_async_queue_mutex);
76
77/**
78 * megasas_get_cmd - Get a command from the free pool
79 * @instance: Adapter soft state
80 *
81 * Returns a free command from the pool
82 */
83static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance
84 *instance)
85{
86 unsigned long flags;
87 struct megasas_cmd *cmd = NULL;
88
89 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
90
91 if (!list_empty(&instance->cmd_pool)) {
92 cmd = list_entry((&instance->cmd_pool)->next,
93 struct megasas_cmd, list);
94 list_del_init(&cmd->list);
95 } else {
96 printk(KERN_ERR "megasas: Command pool empty!\n");
97 }
98
99 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
100 return cmd;
101}
102
103/**
104 * megasas_return_cmd - Return a cmd to free command pool
105 * @instance: Adapter soft state
106 * @cmd: Command packet to be returned to free command pool
107 */
108static inline void
109megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
110{
111 unsigned long flags;
112
113 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
114
115 cmd->scmd = NULL;
116 list_add_tail(&cmd->list, &instance->cmd_pool);
117
118 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
119}
120
121/**
122 * megasas_enable_intr - Enables interrupts
123 * @regs: MFI register set
124 */
125static inline void
126megasas_enable_intr(struct megasas_register_set __iomem * regs)
127{
128 writel(1, &(regs)->outbound_intr_mask);
129
130 /* Dummy readl to force pci flush */
131 readl(&regs->outbound_intr_mask);
132}
133
134/**
135 * megasas_disable_intr - Disables interrupts
136 * @regs: MFI register set
137 */
138static inline void
139megasas_disable_intr(struct megasas_register_set __iomem * regs)
140{
141 u32 mask = readl(&regs->outbound_intr_mask) & (~0x00000001);
142 writel(mask, &regs->outbound_intr_mask);
143
144 /* Dummy readl to force pci flush */
145 readl(&regs->outbound_intr_mask);
146}
147
148/**
149 * megasas_issue_polled - Issues a polling command
150 * @instance: Adapter soft state
151 * @cmd: Command packet to be issued
152 *
153 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
154 */
155static int
156megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
157{
158 int i;
159 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
160
161 struct megasas_header *frame_hdr = &cmd->frame->hdr;
162
163 frame_hdr->cmd_status = 0xFF;
164 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
165
166 /*
167 * Issue the frame using inbound queue port
168 */
169 writel(cmd->frame_phys_addr >> 3,
170 &instance->reg_set->inbound_queue_port);
171
172 /*
173 * Wait for cmd_status to change
174 */
175 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
176 rmb();
177 msleep(1);
178 }
179
180 if (frame_hdr->cmd_status == 0xff)
181 return -ETIME;
182
183 return 0;
184}
185
186/**
187 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
188 * @instance: Adapter soft state
189 * @cmd: Command to be issued
190 *
191 * This function waits on an event for the command to be returned from ISR.
192 * Used to issue ioctl commands.
193 */
194static int
195megasas_issue_blocked_cmd(struct megasas_instance *instance,
196 struct megasas_cmd *cmd)
197{
198 cmd->cmd_status = ENODATA;
199
200 writel(cmd->frame_phys_addr >> 3,
201 &instance->reg_set->inbound_queue_port);
202
203 wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA));
204
205 return 0;
206}
207
208/**
209 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
210 * @instance: Adapter soft state
211 * @cmd_to_abort: Previously issued cmd to be aborted
212 *
213 * MFI firmware can abort previously issued AEN comamnd (automatic event
214 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
215 * cmd and blocks till it is completed.
216 */
217static int
218megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
219 struct megasas_cmd *cmd_to_abort)
220{
221 struct megasas_cmd *cmd;
222 struct megasas_abort_frame *abort_fr;
223
224 cmd = megasas_get_cmd(instance);
225
226 if (!cmd)
227 return -1;
228
229 abort_fr = &cmd->frame->abort;
230
231 /*
232 * Prepare and issue the abort frame
233 */
234 abort_fr->cmd = MFI_CMD_ABORT;
235 abort_fr->cmd_status = 0xFF;
236 abort_fr->flags = 0;
237 abort_fr->abort_context = cmd_to_abort->index;
238 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
239 abort_fr->abort_mfi_phys_addr_hi = 0;
240
241 cmd->sync_cmd = 1;
242 cmd->cmd_status = 0xFF;
243
244 writel(cmd->frame_phys_addr >> 3,
245 &instance->reg_set->inbound_queue_port);
246
247 /*
248 * Wait for this cmd to complete
249 */
250 wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF));
251
252 megasas_return_cmd(instance, cmd);
253 return 0;
254}
255
256/**
257 * megasas_make_sgl32 - Prepares 32-bit SGL
258 * @instance: Adapter soft state
259 * @scp: SCSI command from the mid-layer
260 * @mfi_sgl: SGL to be filled in
261 *
262 * If successful, this function returns the number of SG elements. Otherwise,
263 * it returnes -1.
264 */
265static inline int
266megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
267 union megasas_sgl *mfi_sgl)
268{
269 int i;
270 int sge_count;
271 struct scatterlist *os_sgl;
272
273 /*
274 * Return 0 if there is no data transfer
275 */
276 if (!scp->request_buffer || !scp->request_bufflen)
277 return 0;
278
279 if (!scp->use_sg) {
280 mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
281 scp->
282 request_buffer,
283 scp->
284 request_bufflen,
285 scp->
286 sc_data_direction);
287 mfi_sgl->sge32[0].length = scp->request_bufflen;
288
289 return 1;
290 }
291
292 os_sgl = (struct scatterlist *)scp->request_buffer;
293 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
294 scp->sc_data_direction);
295
296 for (i = 0; i < sge_count; i++, os_sgl++) {
297 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
298 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
299 }
300
301 return sge_count;
302}
303
304/**
305 * megasas_make_sgl64 - Prepares 64-bit SGL
306 * @instance: Adapter soft state
307 * @scp: SCSI command from the mid-layer
308 * @mfi_sgl: SGL to be filled in
309 *
310 * If successful, this function returns the number of SG elements. Otherwise,
311 * it returnes -1.
312 */
313static inline int
314megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
315 union megasas_sgl *mfi_sgl)
316{
317 int i;
318 int sge_count;
319 struct scatterlist *os_sgl;
320
321 /*
322 * Return 0 if there is no data transfer
323 */
324 if (!scp->request_buffer || !scp->request_bufflen)
325 return 0;
326
327 if (!scp->use_sg) {
328 mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
329 scp->
330 request_buffer,
331 scp->
332 request_bufflen,
333 scp->
334 sc_data_direction);
335
336 mfi_sgl->sge64[0].length = scp->request_bufflen;
337
338 return 1;
339 }
340
341 os_sgl = (struct scatterlist *)scp->request_buffer;
342 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
343 scp->sc_data_direction);
344
345 for (i = 0; i < sge_count; i++, os_sgl++) {
346 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
347 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
348 }
349
350 return sge_count;
351}
352
353/**
354 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
355 * @instance: Adapter soft state
356 * @scp: SCSI command
357 * @cmd: Command to be prepared in
358 *
359 * This function prepares CDB commands. These are typcially pass-through
360 * commands to the devices.
361 */
362static inline int
363megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
364 struct megasas_cmd *cmd)
365{
366 u32 sge_sz;
367 int sge_bytes;
368 u32 is_logical;
369 u32 device_id;
370 u16 flags = 0;
371 struct megasas_pthru_frame *pthru;
372
373 is_logical = MEGASAS_IS_LOGICAL(scp);
374 device_id = MEGASAS_DEV_INDEX(instance, scp);
375 pthru = (struct megasas_pthru_frame *)cmd->frame;
376
377 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
378 flags = MFI_FRAME_DIR_WRITE;
379 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
380 flags = MFI_FRAME_DIR_READ;
381 else if (scp->sc_data_direction == PCI_DMA_NONE)
382 flags = MFI_FRAME_DIR_NONE;
383
384 /*
385 * Prepare the DCDB frame
386 */
387 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
388 pthru->cmd_status = 0x0;
389 pthru->scsi_status = 0x0;
390 pthru->target_id = device_id;
391 pthru->lun = scp->device->lun;
392 pthru->cdb_len = scp->cmd_len;
393 pthru->timeout = 0;
394 pthru->flags = flags;
395 pthru->data_xfer_len = scp->request_bufflen;
396
397 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
398
399 /*
400 * Construct SGL
401 */
402 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
403 sizeof(struct megasas_sge32);
404
405 if (IS_DMA64) {
406 pthru->flags |= MFI_FRAME_SGL64;
407 pthru->sge_count = megasas_make_sgl64(instance, scp,
408 &pthru->sgl);
409 } else
410 pthru->sge_count = megasas_make_sgl32(instance, scp,
411 &pthru->sgl);
412
413 /*
414 * Sense info specific
415 */
416 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
417 pthru->sense_buf_phys_addr_hi = 0;
418 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
419
420 sge_bytes = sge_sz * pthru->sge_count;
421
422 /*
423 * Compute the total number of frames this command consumes. FW uses
424 * this number to pull sufficient number of frames from host memory.
425 */
426 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
427 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
428
429 if (cmd->frame_count > 7)
430 cmd->frame_count = 8;
431
432 return cmd->frame_count;
433}
434
435/**
436 * megasas_build_ldio - Prepares IOs to logical devices
437 * @instance: Adapter soft state
438 * @scp: SCSI command
439 * @cmd: Command to to be prepared
440 *
441 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
442 */
443static inline int
444megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
445 struct megasas_cmd *cmd)
446{
447 u32 sge_sz;
448 int sge_bytes;
449 u32 device_id;
450 u8 sc = scp->cmnd[0];
451 u16 flags = 0;
452 struct megasas_io_frame *ldio;
453
454 device_id = MEGASAS_DEV_INDEX(instance, scp);
455 ldio = (struct megasas_io_frame *)cmd->frame;
456
457 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
458 flags = MFI_FRAME_DIR_WRITE;
459 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
460 flags = MFI_FRAME_DIR_READ;
461
462 /*
463 * Preare the Logical IO frame: 2nd bit is zero for all read cmds
464 */
465 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
466 ldio->cmd_status = 0x0;
467 ldio->scsi_status = 0x0;
468 ldio->target_id = device_id;
469 ldio->timeout = 0;
470 ldio->reserved_0 = 0;
471 ldio->pad_0 = 0;
472 ldio->flags = flags;
473 ldio->start_lba_hi = 0;
474 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
475
476 /*
477 * 6-byte READ(0x08) or WRITE(0x0A) cdb
478 */
479 if (scp->cmd_len == 6) {
480 ldio->lba_count = (u32) scp->cmnd[4];
481 ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
482 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
483
484 ldio->start_lba_lo &= 0x1FFFFF;
485 }
486
487 /*
488 * 10-byte READ(0x28) or WRITE(0x2A) cdb
489 */
490 else if (scp->cmd_len == 10) {
491 ldio->lba_count = (u32) scp->cmnd[8] |
492 ((u32) scp->cmnd[7] << 8);
493 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
494 ((u32) scp->cmnd[3] << 16) |
495 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
496 }
497
498 /*
499 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
500 */
501 else if (scp->cmd_len == 12) {
502 ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
503 ((u32) scp->cmnd[7] << 16) |
504 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
505
506 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
507 ((u32) scp->cmnd[3] << 16) |
508 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
509 }
510
511 /*
512 * 16-byte READ(0x88) or WRITE(0x8A) cdb
513 */
514 else if (scp->cmd_len == 16) {
515 ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
516 ((u32) scp->cmnd[11] << 16) |
517 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
518
519 ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
520 ((u32) scp->cmnd[7] << 16) |
521 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
522
523 ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
524 ((u32) scp->cmnd[3] << 16) |
525 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
526
527 }
528
529 /*
530 * Construct SGL
531 */
532 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
533 sizeof(struct megasas_sge32);
534
535 if (IS_DMA64) {
536 ldio->flags |= MFI_FRAME_SGL64;
537 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
538 } else
539 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
540
541 /*
542 * Sense info specific
543 */
544 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
545 ldio->sense_buf_phys_addr_hi = 0;
546 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
547
548 sge_bytes = sge_sz * ldio->sge_count;
549
550 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
551 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
552
553 if (cmd->frame_count > 7)
554 cmd->frame_count = 8;
555
556 return cmd->frame_count;
557}
558
559/**
560 * megasas_build_cmd - Prepares a command packet
561 * @instance: Adapter soft state
562 * @scp: SCSI command
563 * @frame_count: [OUT] Number of frames used to prepare this command
564 */
565static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance
566 *instance,
567 struct scsi_cmnd *scp,
568 int *frame_count)
569{
570 u32 logical_cmd;
571 struct megasas_cmd *cmd;
572
573 /*
574 * Find out if this is logical or physical drive command.
575 */
576 logical_cmd = MEGASAS_IS_LOGICAL(scp);
577
578 /*
579 * Logical drive command
580 */
581 if (logical_cmd) {
582
583 if (scp->device->id >= MEGASAS_MAX_LD) {
584 scp->result = DID_BAD_TARGET << 16;
585 return NULL;
586 }
587
588 switch (scp->cmnd[0]) {
589
590 case READ_10:
591 case WRITE_10:
592 case READ_12:
593 case WRITE_12:
594 case READ_6:
595 case WRITE_6:
596 case READ_16:
597 case WRITE_16:
598 /*
599 * Fail for LUN > 0
600 */
601 if (scp->device->lun) {
602 scp->result = DID_BAD_TARGET << 16;
603 return NULL;
604 }
605
606 cmd = megasas_get_cmd(instance);
607
608 if (!cmd) {
609 scp->result = DID_IMM_RETRY << 16;
610 return NULL;
611 }
612
613 *frame_count = megasas_build_ldio(instance, scp, cmd);
614
615 if (!(*frame_count)) {
616 megasas_return_cmd(instance, cmd);
617 return NULL;
618 }
619
620 return cmd;
621
622 default:
623 /*
624 * Fail for LUN > 0
625 */
626 if (scp->device->lun) {
627 scp->result = DID_BAD_TARGET << 16;
628 return NULL;
629 }
630
631 cmd = megasas_get_cmd(instance);
632
633 if (!cmd) {
634 scp->result = DID_IMM_RETRY << 16;
635 return NULL;
636 }
637
638 *frame_count = megasas_build_dcdb(instance, scp, cmd);
639
640 if (!(*frame_count)) {
641 megasas_return_cmd(instance, cmd);
642 return NULL;
643 }
644
645 return cmd;
646 }
647 } else {
648 cmd = megasas_get_cmd(instance);
649
650 if (!cmd) {
651 scp->result = DID_IMM_RETRY << 16;
652 return NULL;
653 }
654
655 *frame_count = megasas_build_dcdb(instance, scp, cmd);
656
657 if (!(*frame_count)) {
658 megasas_return_cmd(instance, cmd);
659 return NULL;
660 }
661
662 return cmd;
663 }
664
665 return NULL;
666}
667
668/**
669 * megasas_queue_command - Queue entry point
670 * @scmd: SCSI command to be queued
671 * @done: Callback entry point
672 */
673static int
674megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
675{
676 u32 frame_count;
677 unsigned long flags;
678 struct megasas_cmd *cmd;
679 struct megasas_instance *instance;
680
681 instance = (struct megasas_instance *)
682 scmd->device->host->hostdata;
683 scmd->scsi_done = done;
684 scmd->result = 0;
685
686 cmd = megasas_build_cmd(instance, scmd, &frame_count);
687
688 if (!cmd) {
689 done(scmd);
690 return 0;
691 }
692
693 cmd->scmd = scmd;
694 scmd->SCp.ptr = (char *)cmd;
695 scmd->SCp.sent_command = jiffies;
696
697 /*
698 * Issue the command to the FW
699 */
700 spin_lock_irqsave(&instance->instance_lock, flags);
701 instance->fw_outstanding++;
702 spin_unlock_irqrestore(&instance->instance_lock, flags);
703
704 writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)),
705 &instance->reg_set->inbound_queue_port);
706
707 return 0;
708}
709
710/**
711 * megasas_wait_for_outstanding - Wait for all outstanding cmds
712 * @instance: Adapter soft state
713 *
714 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
715 * complete all its outstanding commands. Returns error if one or more IOs
716 * are pending after this time period. It also marks the controller dead.
717 */
718static int megasas_wait_for_outstanding(struct megasas_instance *instance)
719{
720 int i;
721 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
722
723 for (i = 0; i < wait_time; i++) {
724
725 if (!instance->fw_outstanding)
726 break;
727
728 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
729 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
730 "commands to complete\n", i,
731 instance->fw_outstanding);
732 }
733
734 msleep(1000);
735 }
736
737 if (instance->fw_outstanding) {
738 instance->hw_crit_error = 1;
739 return FAILED;
740 }
741
742 return SUCCESS;
743}
744
745/**
746 * megasas_generic_reset - Generic reset routine
747 * @scmd: Mid-layer SCSI command
748 *
749 * This routine implements a generic reset handler for device, bus and host
750 * reset requests. Device, bus and host specific reset handlers can use this
751 * function after they do their specific tasks.
752 */
753static int megasas_generic_reset(struct scsi_cmnd *scmd)
754{
755 int ret_val;
756 struct megasas_instance *instance;
757
758 instance = (struct megasas_instance *)scmd->device->host->hostdata;
759
760 printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x <c=%d t=%d l=%d>\n",
761 scmd->serial_number, scmd->cmnd[0], scmd->device->channel,
762 scmd->device->id, scmd->device->lun);
763
764 if (instance->hw_crit_error) {
765 printk(KERN_ERR "megasas: cannot recover from previous reset "
766 "failures\n");
767 return FAILED;
768 }
769
770 spin_unlock(scmd->device->host->host_lock);
771
772 ret_val = megasas_wait_for_outstanding(instance);
773
774 if (ret_val == SUCCESS)
775 printk(KERN_NOTICE "megasas: reset successful \n");
776 else
777 printk(KERN_ERR "megasas: failed to do reset\n");
778
779 spin_lock(scmd->device->host->host_lock);
780
781 return ret_val;
782}
783
784static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
785{
786 unsigned long seconds;
787
788 if (scmd->SCp.ptr) {
789 seconds = (jiffies - scmd->SCp.sent_command) / HZ;
790
791 if (seconds < 90) {
792 return EH_RESET_TIMER;
793 } else {
794 return EH_NOT_HANDLED;
795 }
796 }
797
798 return EH_HANDLED;
799}
800
801/**
802 * megasas_reset_device - Device reset handler entry point
803 */
804static int megasas_reset_device(struct scsi_cmnd *scmd)
805{
806 int ret;
807
808 /*
809 * First wait for all commands to complete
810 */
811 ret = megasas_generic_reset(scmd);
812
813 return ret;
814}
815
816/**
817 * megasas_reset_bus_host - Bus & host reset handler entry point
818 */
819static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
820{
821 int ret;
822
823 /*
824 * Frist wait for all commands to complete
825 */
826 ret = megasas_generic_reset(scmd);
827
828 return ret;
829}
830
831/**
832 * megasas_service_aen - Processes an event notification
833 * @instance: Adapter soft state
834 * @cmd: AEN command completed by the ISR
835 *
836 * For AEN, driver sends a command down to FW that is held by the FW till an
837 * event occurs. When an event of interest occurs, FW completes the command
838 * that it was previously holding.
839 *
840 * This routines sends SIGIO signal to processes that have registered with the
841 * driver for AEN.
842 */
843static void
844megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
845{
846 /*
847 * Don't signal app if it is just an aborted previously registered aen
848 */
849 if (!cmd->abort_aen)
850 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
851 else
852 cmd->abort_aen = 0;
853
854 instance->aen_cmd = NULL;
855 megasas_return_cmd(instance, cmd);
856}
857
858/*
859 * Scsi host template for megaraid_sas driver
860 */
861static struct scsi_host_template megasas_template = {
862
863 .module = THIS_MODULE,
864 .name = "LSI Logic SAS based MegaRAID driver",
865 .proc_name = "megaraid_sas",
866 .queuecommand = megasas_queue_command,
867 .eh_device_reset_handler = megasas_reset_device,
868 .eh_bus_reset_handler = megasas_reset_bus_host,
869 .eh_host_reset_handler = megasas_reset_bus_host,
870 .eh_timed_out = megasas_reset_timer,
871 .use_clustering = ENABLE_CLUSTERING,
872};
873
874/**
875 * megasas_complete_int_cmd - Completes an internal command
876 * @instance: Adapter soft state
877 * @cmd: Command to be completed
878 *
879 * The megasas_issue_blocked_cmd() function waits for a command to complete
880 * after it issues a command. This function wakes up that waiting routine by
881 * calling wake_up() on the wait queue.
882 */
883static void
884megasas_complete_int_cmd(struct megasas_instance *instance,
885 struct megasas_cmd *cmd)
886{
887 cmd->cmd_status = cmd->frame->io.cmd_status;
888
889 if (cmd->cmd_status == ENODATA) {
890 cmd->cmd_status = 0;
891 }
892 wake_up(&instance->int_cmd_wait_q);
893}
894
895/**
896 * megasas_complete_abort - Completes aborting a command
897 * @instance: Adapter soft state
898 * @cmd: Cmd that was issued to abort another cmd
899 *
900 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
901 * after it issues an abort on a previously issued command. This function
902 * wakes up all functions waiting on the same wait queue.
903 */
904static void
905megasas_complete_abort(struct megasas_instance *instance,
906 struct megasas_cmd *cmd)
907{
908 if (cmd->sync_cmd) {
909 cmd->sync_cmd = 0;
910 cmd->cmd_status = 0;
911 wake_up(&instance->abort_cmd_wait_q);
912 }
913
914 return;
915}
916
917/**
918 * megasas_unmap_sgbuf - Unmap SG buffers
919 * @instance: Adapter soft state
920 * @cmd: Completed command
921 */
922static inline void
923megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
924{
925 dma_addr_t buf_h;
926 u8 opcode;
927
928 if (cmd->scmd->use_sg) {
929 pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
930 cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
931 return;
932 }
933
934 if (!cmd->scmd->request_bufflen)
935 return;
936
937 opcode = cmd->frame->hdr.cmd;
938
939 if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
940 if (IS_DMA64)
941 buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
942 else
943 buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
944 } else {
945 if (IS_DMA64)
946 buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
947 else
948 buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
949 }
950
951 pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
952 cmd->scmd->sc_data_direction);
953 return;
954}
955
956/**
957 * megasas_complete_cmd - Completes a command
958 * @instance: Adapter soft state
959 * @cmd: Command to be completed
960 * @alt_status: If non-zero, use this value as status to
961 * SCSI mid-layer instead of the value returned
962 * by the FW. This should be used if caller wants
963 * an alternate status (as in the case of aborted
964 * commands)
965 */
966static inline void
967megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
968 u8 alt_status)
969{
970 int exception = 0;
971 struct megasas_header *hdr = &cmd->frame->hdr;
972 unsigned long flags;
973
974 if (cmd->scmd) {
975 cmd->scmd->SCp.ptr = (char *)0;
976 }
977
978 switch (hdr->cmd) {
979
980 case MFI_CMD_PD_SCSI_IO:
981 case MFI_CMD_LD_SCSI_IO:
982
983 /*
984 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
985 * issued either through an IO path or an IOCTL path. If it
986 * was via IOCTL, we will send it to internal completion.
987 */
988 if (cmd->sync_cmd) {
989 cmd->sync_cmd = 0;
990 megasas_complete_int_cmd(instance, cmd);
991 break;
992 }
993
994 /*
995 * Don't export physical disk devices to mid-layer.
996 */
997 if (!MEGASAS_IS_LOGICAL(cmd->scmd) &&
998 (hdr->cmd_status == MFI_STAT_OK) &&
999 (cmd->scmd->cmnd[0] == INQUIRY)) {
1000
1001 if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) ==
1002 TYPE_DISK) {
1003 cmd->scmd->result = DID_BAD_TARGET << 16;
1004 exception = 1;
1005 }
1006 }
1007
1008 case MFI_CMD_LD_READ:
1009 case MFI_CMD_LD_WRITE:
1010
1011 if (alt_status) {
1012 cmd->scmd->result = alt_status << 16;
1013 exception = 1;
1014 }
1015
1016 if (exception) {
1017
1018 spin_lock_irqsave(&instance->instance_lock, flags);
1019 instance->fw_outstanding--;
1020 spin_unlock_irqrestore(&instance->instance_lock, flags);
1021
1022 megasas_unmap_sgbuf(instance, cmd);
1023 cmd->scmd->scsi_done(cmd->scmd);
1024 megasas_return_cmd(instance, cmd);
1025
1026 break;
1027 }
1028
1029 switch (hdr->cmd_status) {
1030
1031 case MFI_STAT_OK:
1032 cmd->scmd->result = DID_OK << 16;
1033 break;
1034
1035 case MFI_STAT_SCSI_IO_FAILED:
1036 case MFI_STAT_LD_INIT_IN_PROGRESS:
1037 cmd->scmd->result =
1038 (DID_ERROR << 16) | hdr->scsi_status;
1039 break;
1040
1041 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1042
1043 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
1044
1045 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
1046 memset(cmd->scmd->sense_buffer, 0,
1047 SCSI_SENSE_BUFFERSIZE);
1048 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1049 hdr->sense_len);
1050
1051 cmd->scmd->result |= DRIVER_SENSE << 24;
1052 }
1053
1054 break;
1055
1056 case MFI_STAT_LD_OFFLINE:
1057 case MFI_STAT_DEVICE_NOT_FOUND:
1058 cmd->scmd->result = DID_BAD_TARGET << 16;
1059 break;
1060
1061 default:
1062 printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
1063 hdr->cmd_status);
1064 cmd->scmd->result = DID_ERROR << 16;
1065 break;
1066 }
1067
1068 spin_lock_irqsave(&instance->instance_lock, flags);
1069 instance->fw_outstanding--;
1070 spin_unlock_irqrestore(&instance->instance_lock, flags);
1071
1072 megasas_unmap_sgbuf(instance, cmd);
1073 cmd->scmd->scsi_done(cmd->scmd);
1074 megasas_return_cmd(instance, cmd);
1075
1076 break;
1077
1078 case MFI_CMD_SMP:
1079 case MFI_CMD_STP:
1080 case MFI_CMD_DCMD:
1081
1082 /*
1083 * See if got an event notification
1084 */
1085 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
1086 megasas_service_aen(instance, cmd);
1087 else
1088 megasas_complete_int_cmd(instance, cmd);
1089
1090 break;
1091
1092 case MFI_CMD_ABORT:
1093 /*
1094 * Cmd issued to abort another cmd returned
1095 */
1096 megasas_complete_abort(instance, cmd);
1097 break;
1098
1099 default:
1100 printk("megasas: Unknown command completed! [0x%X]\n",
1101 hdr->cmd);
1102 break;
1103 }
1104}
1105
1106/**
1107 * megasas_deplete_reply_queue - Processes all completed commands
1108 * @instance: Adapter soft state
1109 * @alt_status: Alternate status to be returned to
1110 * SCSI mid-layer instead of the status
1111 * returned by the FW
1112 */
1113static inline int
1114megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
1115{
1116 u32 status;
1117 u32 producer;
1118 u32 consumer;
1119 u32 context;
1120 struct megasas_cmd *cmd;
1121
1122 /*
1123 * Check if it is our interrupt
1124 */
1125 status = readl(&instance->reg_set->outbound_intr_status);
1126
1127 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
1128 return IRQ_NONE;
1129 }
1130
1131 /*
1132 * Clear the interrupt by writing back the same value
1133 */
1134 writel(status, &instance->reg_set->outbound_intr_status);
1135
1136 producer = *instance->producer;
1137 consumer = *instance->consumer;
1138
1139 while (consumer != producer) {
1140 context = instance->reply_queue[consumer];
1141
1142 cmd = instance->cmd_list[context];
1143
1144 megasas_complete_cmd(instance, cmd, alt_status);
1145
1146 consumer++;
1147 if (consumer == (instance->max_fw_cmds + 1)) {
1148 consumer = 0;
1149 }
1150 }
1151
1152 *instance->consumer = producer;
1153
1154 return IRQ_HANDLED;
1155}
1156
1157/**
1158 * megasas_isr - isr entry point
1159 */
1160static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
1161{
1162 return megasas_deplete_reply_queue((struct megasas_instance *)devp,
1163 DID_OK);
1164}
1165
1166/**
1167 * megasas_transition_to_ready - Move the FW to READY state
1168 * @reg_set: MFI register set
1169 *
1170 * During the initialization, FW passes can potentially be in any one of
1171 * several possible states. If the FW in operational, waiting-for-handshake
1172 * states, driver must take steps to bring it to ready state. Otherwise, it
1173 * has to wait for the ready state.
1174 */
1175static int
1176megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
1177{
1178 int i;
1179 u8 max_wait;
1180 u32 fw_state;
1181 u32 cur_state;
1182
1183 fw_state = readl(&reg_set->outbound_msg_0) & MFI_STATE_MASK;
1184
1185 while (fw_state != MFI_STATE_READY) {
1186
1187 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
1188 " state\n");
1189 switch (fw_state) {
1190
1191 case MFI_STATE_FAULT:
1192
1193 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
1194 return -ENODEV;
1195
1196 case MFI_STATE_WAIT_HANDSHAKE:
1197 /*
1198 * Set the CLR bit in inbound doorbell
1199 */
1200 writel(MFI_INIT_CLEAR_HANDSHAKE,
1201 &reg_set->inbound_doorbell);
1202
1203 max_wait = 2;
1204 cur_state = MFI_STATE_WAIT_HANDSHAKE;
1205 break;
1206
1207 case MFI_STATE_OPERATIONAL:
1208 /*
1209 * Bring it to READY state; assuming max wait 2 secs
1210 */
1211 megasas_disable_intr(reg_set);
1212 writel(MFI_INIT_READY, &reg_set->inbound_doorbell);
1213
1214 max_wait = 10;
1215 cur_state = MFI_STATE_OPERATIONAL;
1216 break;
1217
1218 case MFI_STATE_UNDEFINED:
1219 /*
1220 * This state should not last for more than 2 seconds
1221 */
1222 max_wait = 2;
1223 cur_state = MFI_STATE_UNDEFINED;
1224 break;
1225
1226 case MFI_STATE_BB_INIT:
1227 max_wait = 2;
1228 cur_state = MFI_STATE_BB_INIT;
1229 break;
1230
1231 case MFI_STATE_FW_INIT:
1232 max_wait = 20;
1233 cur_state = MFI_STATE_FW_INIT;
1234 break;
1235
1236 case MFI_STATE_FW_INIT_2:
1237 max_wait = 20;
1238 cur_state = MFI_STATE_FW_INIT_2;
1239 break;
1240
1241 case MFI_STATE_DEVICE_SCAN:
1242 max_wait = 20;
1243 cur_state = MFI_STATE_DEVICE_SCAN;
1244 break;
1245
1246 case MFI_STATE_FLUSH_CACHE:
1247 max_wait = 20;
1248 cur_state = MFI_STATE_FLUSH_CACHE;
1249 break;
1250
1251 default:
1252 printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
1253 fw_state);
1254 return -ENODEV;
1255 }
1256
1257 /*
1258 * The cur_state should not last for more than max_wait secs
1259 */
1260 for (i = 0; i < (max_wait * 1000); i++) {
1261 fw_state = MFI_STATE_MASK &
1262 readl(&reg_set->outbound_msg_0);
1263
1264 if (fw_state == cur_state) {
1265 msleep(1);
1266 } else
1267 break;
1268 }
1269
1270 /*
1271 * Return error if fw_state hasn't changed after max_wait
1272 */
1273 if (fw_state == cur_state) {
1274 printk(KERN_DEBUG "FW state [%d] hasn't changed "
1275 "in %d secs\n", fw_state, max_wait);
1276 return -ENODEV;
1277 }
1278 };
1279
1280 return 0;
1281}
1282
1283/**
1284 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
1285 * @instance: Adapter soft state
1286 */
1287static void megasas_teardown_frame_pool(struct megasas_instance *instance)
1288{
1289 int i;
1290 u32 max_cmd = instance->max_fw_cmds;
1291 struct megasas_cmd *cmd;
1292
1293 if (!instance->frame_dma_pool)
1294 return;
1295
1296 /*
1297 * Return all frames to pool
1298 */
1299 for (i = 0; i < max_cmd; i++) {
1300
1301 cmd = instance->cmd_list[i];
1302
1303 if (cmd->frame)
1304 pci_pool_free(instance->frame_dma_pool, cmd->frame,
1305 cmd->frame_phys_addr);
1306
1307 if (cmd->sense)
1308 pci_pool_free(instance->sense_dma_pool, cmd->frame,
1309 cmd->sense_phys_addr);
1310 }
1311
1312 /*
1313 * Now destroy the pool itself
1314 */
1315 pci_pool_destroy(instance->frame_dma_pool);
1316 pci_pool_destroy(instance->sense_dma_pool);
1317
1318 instance->frame_dma_pool = NULL;
1319 instance->sense_dma_pool = NULL;
1320}
1321
1322/**
1323 * megasas_create_frame_pool - Creates DMA pool for cmd frames
1324 * @instance: Adapter soft state
1325 *
1326 * Each command packet has an embedded DMA memory buffer that is used for
1327 * filling MFI frame and the SG list that immediately follows the frame. This
1328 * function creates those DMA memory buffers for each command packet by using
1329 * PCI pool facility.
1330 */
1331static int megasas_create_frame_pool(struct megasas_instance *instance)
1332{
1333 int i;
1334 u32 max_cmd;
1335 u32 sge_sz;
1336 u32 sgl_sz;
1337 u32 total_sz;
1338 u32 frame_count;
1339 struct megasas_cmd *cmd;
1340
1341 max_cmd = instance->max_fw_cmds;
1342
1343 /*
1344 * Size of our frame is 64 bytes for MFI frame, followed by max SG
1345 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
1346 */
1347 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1348 sizeof(struct megasas_sge32);
1349
1350 /*
1351 * Calculated the number of 64byte frames required for SGL
1352 */
1353 sgl_sz = sge_sz * instance->max_num_sge;
1354 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
1355
1356 /*
1357 * We need one extra frame for the MFI command
1358 */
1359 frame_count++;
1360
1361 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
1362 /*
1363 * Use DMA pool facility provided by PCI layer
1364 */
1365 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
1366 instance->pdev, total_sz, 64,
1367 0);
1368
1369 if (!instance->frame_dma_pool) {
1370 printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
1371 return -ENOMEM;
1372 }
1373
1374 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
1375 instance->pdev, 128, 4, 0);
1376
1377 if (!instance->sense_dma_pool) {
1378 printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
1379
1380 pci_pool_destroy(instance->frame_dma_pool);
1381 instance->frame_dma_pool = NULL;
1382
1383 return -ENOMEM;
1384 }
1385
1386 /*
1387 * Allocate and attach a frame to each of the commands in cmd_list.
1388 * By making cmd->index as the context instead of the &cmd, we can
1389 * always use 32bit context regardless of the architecture
1390 */
1391 for (i = 0; i < max_cmd; i++) {
1392
1393 cmd = instance->cmd_list[i];
1394
1395 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
1396 GFP_KERNEL, &cmd->frame_phys_addr);
1397
1398 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
1399 GFP_KERNEL, &cmd->sense_phys_addr);
1400
1401 /*
1402 * megasas_teardown_frame_pool() takes care of freeing
1403 * whatever has been allocated
1404 */
1405 if (!cmd->frame || !cmd->sense) {
1406 printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
1407 megasas_teardown_frame_pool(instance);
1408 return -ENOMEM;
1409 }
1410
1411 cmd->frame->io.context = cmd->index;
1412 }
1413
1414 return 0;
1415}
1416
1417/**
1418 * megasas_free_cmds - Free all the cmds in the free cmd pool
1419 * @instance: Adapter soft state
1420 */
1421static void megasas_free_cmds(struct megasas_instance *instance)
1422{
1423 int i;
1424 /* First free the MFI frame pool */
1425 megasas_teardown_frame_pool(instance);
1426
1427 /* Free all the commands in the cmd_list */
1428 for (i = 0; i < instance->max_fw_cmds; i++)
1429 kfree(instance->cmd_list[i]);
1430
1431 /* Free the cmd_list buffer itself */
1432 kfree(instance->cmd_list);
1433 instance->cmd_list = NULL;
1434
1435 INIT_LIST_HEAD(&instance->cmd_pool);
1436}
1437
1438/**
1439 * megasas_alloc_cmds - Allocates the command packets
1440 * @instance: Adapter soft state
1441 *
1442 * Each command that is issued to the FW, whether IO commands from the OS or
1443 * internal commands like IOCTLs, are wrapped in local data structure called
1444 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
1445 * the FW.
1446 *
1447 * Each frame has a 32-bit field called context (tag). This context is used
1448 * to get back the megasas_cmd from the frame when a frame gets completed in
1449 * the ISR. Typically the address of the megasas_cmd itself would be used as
1450 * the context. But we wanted to keep the differences between 32 and 64 bit
1451 * systems to the mininum. We always use 32 bit integers for the context. In
1452 * this driver, the 32 bit values are the indices into an array cmd_list.
1453 * This array is used only to look up the megasas_cmd given the context. The
1454 * free commands themselves are maintained in a linked list called cmd_pool.
1455 */
1456static int megasas_alloc_cmds(struct megasas_instance *instance)
1457{
1458 int i;
1459 int j;
1460 u32 max_cmd;
1461 struct megasas_cmd *cmd;
1462
1463 max_cmd = instance->max_fw_cmds;
1464
1465 /*
1466 * instance->cmd_list is an array of struct megasas_cmd pointers.
1467 * Allocate the dynamic array first and then allocate individual
1468 * commands.
1469 */
1470 instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
1471 GFP_KERNEL);
1472
1473 if (!instance->cmd_list) {
1474 printk(KERN_DEBUG "megasas: out of memory\n");
1475 return -ENOMEM;
1476 }
1477
1478 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
1479
1480 for (i = 0; i < max_cmd; i++) {
1481 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
1482 GFP_KERNEL);
1483
1484 if (!instance->cmd_list[i]) {
1485
1486 for (j = 0; j < i; j++)
1487 kfree(instance->cmd_list[j]);
1488
1489 kfree(instance->cmd_list);
1490 instance->cmd_list = NULL;
1491
1492 return -ENOMEM;
1493 }
1494 }
1495
1496 /*
1497 * Add all the commands to command pool (instance->cmd_pool)
1498 */
1499 for (i = 0; i < max_cmd; i++) {
1500 cmd = instance->cmd_list[i];
1501 memset(cmd, 0, sizeof(struct megasas_cmd));
1502 cmd->index = i;
1503 cmd->instance = instance;
1504
1505 list_add_tail(&cmd->list, &instance->cmd_pool);
1506 }
1507
1508 /*
1509 * Create a frame pool and assign one frame to each cmd
1510 */
1511 if (megasas_create_frame_pool(instance)) {
1512 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
1513 megasas_free_cmds(instance);
1514 }
1515
1516 return 0;
1517}
1518
1519/**
1520 * megasas_get_controller_info - Returns FW's controller structure
1521 * @instance: Adapter soft state
1522 * @ctrl_info: Controller information structure
1523 *
1524 * Issues an internal command (DCMD) to get the FW's controller structure.
1525 * This information is mainly used to find out the maximum IO transfer per
1526 * command supported by the FW.
1527 */
1528static int
1529megasas_get_ctrl_info(struct megasas_instance *instance,
1530 struct megasas_ctrl_info *ctrl_info)
1531{
1532 int ret = 0;
1533 struct megasas_cmd *cmd;
1534 struct megasas_dcmd_frame *dcmd;
1535 struct megasas_ctrl_info *ci;
1536 dma_addr_t ci_h = 0;
1537
1538 cmd = megasas_get_cmd(instance);
1539
1540 if (!cmd) {
1541 printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
1542 return -ENOMEM;
1543 }
1544
1545 dcmd = &cmd->frame->dcmd;
1546
1547 ci = pci_alloc_consistent(instance->pdev,
1548 sizeof(struct megasas_ctrl_info), &ci_h);
1549
1550 if (!ci) {
1551 printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
1552 megasas_return_cmd(instance, cmd);
1553 return -ENOMEM;
1554 }
1555
1556 memset(ci, 0, sizeof(*ci));
1557 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1558
1559 dcmd->cmd = MFI_CMD_DCMD;
1560 dcmd->cmd_status = 0xFF;
1561 dcmd->sge_count = 1;
1562 dcmd->flags = MFI_FRAME_DIR_READ;
1563 dcmd->timeout = 0;
1564 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
1565 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
1566 dcmd->sgl.sge32[0].phys_addr = ci_h;
1567 dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
1568
1569 if (!megasas_issue_polled(instance, cmd)) {
1570 ret = 0;
1571 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
1572 } else {
1573 ret = -1;
1574 }
1575
1576 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
1577 ci, ci_h);
1578
1579 megasas_return_cmd(instance, cmd);
1580 return ret;
1581}
1582
1583/**
1584 * megasas_init_mfi - Initializes the FW
1585 * @instance: Adapter soft state
1586 *
1587 * This is the main function for initializing MFI firmware.
1588 */
1589static int megasas_init_mfi(struct megasas_instance *instance)
1590{
1591 u32 context_sz;
1592 u32 reply_q_sz;
1593 u32 max_sectors_1;
1594 u32 max_sectors_2;
1595 struct megasas_register_set __iomem *reg_set;
1596
1597 struct megasas_cmd *cmd;
1598 struct megasas_ctrl_info *ctrl_info;
1599
1600 struct megasas_init_frame *init_frame;
1601 struct megasas_init_queue_info *initq_info;
1602 dma_addr_t init_frame_h;
1603 dma_addr_t initq_info_h;
1604
1605 /*
1606 * Map the message registers
1607 */
1608 instance->base_addr = pci_resource_start(instance->pdev, 0);
1609
1610 if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
1611 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
1612 return -EBUSY;
1613 }
1614
1615 instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
1616
1617 if (!instance->reg_set) {
1618 printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
1619 goto fail_ioremap;
1620 }
1621
1622 reg_set = instance->reg_set;
1623
1624 /*
1625 * We expect the FW state to be READY
1626 */
1627 if (megasas_transition_to_ready(instance->reg_set))
1628 goto fail_ready_state;
1629
1630 /*
1631 * Get various operational parameters from status register
1632 */
1633 instance->max_fw_cmds = readl(&reg_set->outbound_msg_0) & 0x00FFFF;
1634 instance->max_num_sge = (readl(&reg_set->outbound_msg_0) & 0xFF0000) >>
1635 0x10;
1636 /*
1637 * Create a pool of commands
1638 */
1639 if (megasas_alloc_cmds(instance))
1640 goto fail_alloc_cmds;
1641
1642 /*
1643 * Allocate memory for reply queue. Length of reply queue should
1644 * be _one_ more than the maximum commands handled by the firmware.
1645 *
1646 * Note: When FW completes commands, it places corresponding contex
1647 * values in this circular reply queue. This circular queue is a fairly
1648 * typical producer-consumer queue. FW is the producer (of completed
1649 * commands) and the driver is the consumer.
1650 */
1651 context_sz = sizeof(u32);
1652 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
1653
1654 instance->reply_queue = pci_alloc_consistent(instance->pdev,
1655 reply_q_sz,
1656 &instance->reply_queue_h);
1657
1658 if (!instance->reply_queue) {
1659 printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
1660 goto fail_reply_queue;
1661 }
1662
1663 /*
1664 * Prepare a init frame. Note the init frame points to queue info
1665 * structure. Each frame has SGL allocated after first 64 bytes. For
1666 * this frame - since we don't need any SGL - we use SGL's space as
1667 * queue info structure
1668 *
1669 * We will not get a NULL command below. We just created the pool.
1670 */
1671 cmd = megasas_get_cmd(instance);
1672
1673 init_frame = (struct megasas_init_frame *)cmd->frame;
1674 initq_info = (struct megasas_init_queue_info *)
1675 ((unsigned long)init_frame + 64);
1676
1677 init_frame_h = cmd->frame_phys_addr;
1678 initq_info_h = init_frame_h + 64;
1679
1680 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
1681 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
1682
1683 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
1684 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
1685
1686 initq_info->producer_index_phys_addr_lo = instance->producer_h;
1687 initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
1688
1689 init_frame->cmd = MFI_CMD_INIT;
1690 init_frame->cmd_status = 0xFF;
1691 init_frame->queue_info_new_phys_addr_lo = initq_info_h;
1692
1693 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1694
1695 /*
1696 * Issue the init frame in polled mode
1697 */
1698 if (megasas_issue_polled(instance, cmd)) {
1699 printk(KERN_DEBUG "megasas: Failed to init firmware\n");
1700 goto fail_fw_init;
1701 }
1702
1703 megasas_return_cmd(instance, cmd);
1704
1705 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
1706
1707 /*
1708 * Compute the max allowed sectors per IO: The controller info has two
1709 * limits on max sectors. Driver should use the minimum of these two.
1710 *
1711 * 1 << stripe_sz_ops.min = max sectors per strip
1712 *
1713 * Note that older firmwares ( < FW ver 30) didn't report information
1714 * to calculate max_sectors_1. So the number ended up as zero always.
1715 */
1716 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
1717
1718 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1719 ctrl_info->max_strips_per_io;
1720 max_sectors_2 = ctrl_info->max_request_size;
1721
1722 instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
1723 ? max_sectors_1 : max_sectors_2;
1724 } else
1725 instance->max_sectors_per_req = instance->max_num_sge *
1726 PAGE_SIZE / 512;
1727
1728 kfree(ctrl_info);
1729
1730 return 0;
1731
1732 fail_fw_init:
1733 megasas_return_cmd(instance, cmd);
1734
1735 pci_free_consistent(instance->pdev, reply_q_sz,
1736 instance->reply_queue, instance->reply_queue_h);
1737 fail_reply_queue:
1738 megasas_free_cmds(instance);
1739
1740 fail_alloc_cmds:
1741 fail_ready_state:
1742 iounmap(instance->reg_set);
1743
1744 fail_ioremap:
1745 pci_release_regions(instance->pdev);
1746
1747 return -EINVAL;
1748}
1749
1750/**
1751 * megasas_release_mfi - Reverses the FW initialization
1752 * @intance: Adapter soft state
1753 */
1754static void megasas_release_mfi(struct megasas_instance *instance)
1755{
1756 u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
1757
1758 pci_free_consistent(instance->pdev, reply_q_sz,
1759 instance->reply_queue, instance->reply_queue_h);
1760
1761 megasas_free_cmds(instance);
1762
1763 iounmap(instance->reg_set);
1764
1765 pci_release_regions(instance->pdev);
1766}
1767
1768/**
1769 * megasas_get_seq_num - Gets latest event sequence numbers
1770 * @instance: Adapter soft state
1771 * @eli: FW event log sequence numbers information
1772 *
1773 * FW maintains a log of all events in a non-volatile area. Upper layers would
1774 * usually find out the latest sequence number of the events, the seq number at
1775 * the boot etc. They would "read" all the events below the latest seq number
1776 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
1777 * number), they would subsribe to AEN (asynchronous event notification) and
1778 * wait for the events to happen.
1779 */
1780static int
1781megasas_get_seq_num(struct megasas_instance *instance,
1782 struct megasas_evt_log_info *eli)
1783{
1784 struct megasas_cmd *cmd;
1785 struct megasas_dcmd_frame *dcmd;
1786 struct megasas_evt_log_info *el_info;
1787 dma_addr_t el_info_h = 0;
1788
1789 cmd = megasas_get_cmd(instance);
1790
1791 if (!cmd) {
1792 return -ENOMEM;
1793 }
1794
1795 dcmd = &cmd->frame->dcmd;
1796 el_info = pci_alloc_consistent(instance->pdev,
1797 sizeof(struct megasas_evt_log_info),
1798 &el_info_h);
1799
1800 if (!el_info) {
1801 megasas_return_cmd(instance, cmd);
1802 return -ENOMEM;
1803 }
1804
1805 memset(el_info, 0, sizeof(*el_info));
1806 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1807
1808 dcmd->cmd = MFI_CMD_DCMD;
1809 dcmd->cmd_status = 0x0;
1810 dcmd->sge_count = 1;
1811 dcmd->flags = MFI_FRAME_DIR_READ;
1812 dcmd->timeout = 0;
1813 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
1814 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
1815 dcmd->sgl.sge32[0].phys_addr = el_info_h;
1816 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
1817
1818 megasas_issue_blocked_cmd(instance, cmd);
1819
1820 /*
1821 * Copy the data back into callers buffer
1822 */
1823 memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
1824
1825 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
1826 el_info, el_info_h);
1827
1828 megasas_return_cmd(instance, cmd);
1829
1830 return 0;
1831}
1832
1833/**
1834 * megasas_register_aen - Registers for asynchronous event notification
1835 * @instance: Adapter soft state
1836 * @seq_num: The starting sequence number
1837 * @class_locale: Class of the event
1838 *
1839 * This function subscribes for AEN for events beyond the @seq_num. It requests
1840 * to be notified if and only if the event is of type @class_locale
1841 */
1842static int
1843megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
1844 u32 class_locale_word)
1845{
1846 int ret_val;
1847 struct megasas_cmd *cmd;
1848 struct megasas_dcmd_frame *dcmd;
1849 union megasas_evt_class_locale curr_aen;
1850 union megasas_evt_class_locale prev_aen;
1851
1852 /*
1853 * If there an AEN pending already (aen_cmd), check if the
1854 * class_locale of that pending AEN is inclusive of the new
1855 * AEN request we currently have. If it is, then we don't have
1856 * to do anything. In other words, whichever events the current
1857 * AEN request is subscribing to, have already been subscribed
1858 * to.
1859 *
1860 * If the old_cmd is _not_ inclusive, then we have to abort
1861 * that command, form a class_locale that is superset of both
1862 * old and current and re-issue to the FW
1863 */
1864
1865 curr_aen.word = class_locale_word;
1866
1867 if (instance->aen_cmd) {
1868
1869 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
1870
1871 /*
1872 * A class whose enum value is smaller is inclusive of all
1873 * higher values. If a PROGRESS (= -1) was previously
1874 * registered, then a new registration requests for higher
1875 * classes need not be sent to FW. They are automatically
1876 * included.
1877 *
1878 * Locale numbers don't have such hierarchy. They are bitmap
1879 * values
1880 */
1881 if ((prev_aen.members.class <= curr_aen.members.class) &&
1882 !((prev_aen.members.locale & curr_aen.members.locale) ^
1883 curr_aen.members.locale)) {
1884 /*
1885 * Previously issued event registration includes
1886 * current request. Nothing to do.
1887 */
1888 return 0;
1889 } else {
1890 curr_aen.members.locale |= prev_aen.members.locale;
1891
1892 if (prev_aen.members.class < curr_aen.members.class)
1893 curr_aen.members.class = prev_aen.members.class;
1894
1895 instance->aen_cmd->abort_aen = 1;
1896 ret_val = megasas_issue_blocked_abort_cmd(instance,
1897 instance->
1898 aen_cmd);
1899
1900 if (ret_val) {
1901 printk(KERN_DEBUG "megasas: Failed to abort "
1902 "previous AEN command\n");
1903 return ret_val;
1904 }
1905 }
1906 }
1907
1908 cmd = megasas_get_cmd(instance);
1909
1910 if (!cmd)
1911 return -ENOMEM;
1912
1913 dcmd = &cmd->frame->dcmd;
1914
1915 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
1916
1917 /*
1918 * Prepare DCMD for aen registration
1919 */
1920 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1921
1922 dcmd->cmd = MFI_CMD_DCMD;
1923 dcmd->cmd_status = 0x0;
1924 dcmd->sge_count = 1;
1925 dcmd->flags = MFI_FRAME_DIR_READ;
1926 dcmd->timeout = 0;
1927 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
1928 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
1929 dcmd->mbox.w[0] = seq_num;
1930 dcmd->mbox.w[1] = curr_aen.word;
1931 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
1932 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
1933
1934 /*
1935 * Store reference to the cmd used to register for AEN. When an
1936 * application wants us to register for AEN, we have to abort this
1937 * cmd and re-register with a new EVENT LOCALE supplied by that app
1938 */
1939 instance->aen_cmd = cmd;
1940
1941 /*
1942 * Issue the aen registration frame
1943 */
1944 writel(cmd->frame_phys_addr >> 3,
1945 &instance->reg_set->inbound_queue_port);
1946
1947 return 0;
1948}
1949
1950/**
1951 * megasas_start_aen - Subscribes to AEN during driver load time
1952 * @instance: Adapter soft state
1953 */
1954static int megasas_start_aen(struct megasas_instance *instance)
1955{
1956 struct megasas_evt_log_info eli;
1957 union megasas_evt_class_locale class_locale;
1958
1959 /*
1960 * Get the latest sequence number from FW
1961 */
1962 memset(&eli, 0, sizeof(eli));
1963
1964 if (megasas_get_seq_num(instance, &eli))
1965 return -1;
1966
1967 /*
1968 * Register AEN with FW for latest sequence number plus 1
1969 */
1970 class_locale.members.reserved = 0;
1971 class_locale.members.locale = MR_EVT_LOCALE_ALL;
1972 class_locale.members.class = MR_EVT_CLASS_DEBUG;
1973
1974 return megasas_register_aen(instance, eli.newest_seq_num + 1,
1975 class_locale.word);
1976}
1977
1978/**
1979 * megasas_io_attach - Attaches this driver to SCSI mid-layer
1980 * @instance: Adapter soft state
1981 */
1982static int megasas_io_attach(struct megasas_instance *instance)
1983{
1984 struct Scsi_Host *host = instance->host;
1985
1986 /*
1987 * Export parameters required by SCSI mid-layer
1988 */
1989 host->irq = instance->pdev->irq;
1990 host->unique_id = instance->unique_id;
1991 host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
1992 host->this_id = instance->init_id;
1993 host->sg_tablesize = instance->max_num_sge;
1994 host->max_sectors = instance->max_sectors_per_req;
1995 host->cmd_per_lun = 128;
1996 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1997 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1998 host->max_lun = MEGASAS_MAX_LUN;
1999
2000 /*
2001 * Notify the mid-layer about the new controller
2002 */
2003 if (scsi_add_host(host, &instance->pdev->dev)) {
2004 printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
2005 return -ENODEV;
2006 }
2007
2008 /*
2009 * Trigger SCSI to scan our drives
2010 */
2011 scsi_scan_host(host);
2012 return 0;
2013}
2014
2015/**
2016 * megasas_probe_one - PCI hotplug entry point
2017 * @pdev: PCI device structure
2018 * @id: PCI ids of supported hotplugged adapter
2019 */
2020static int __devinit
2021megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2022{
2023 int rval;
2024 struct Scsi_Host *host;
2025 struct megasas_instance *instance;
2026
2027 /*
2028 * Announce PCI information
2029 */
2030 printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2031 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2032 pdev->subsystem_device);
2033
2034 printk("bus %d:slot %d:func %d\n",
2035 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2036
2037 /*
2038 * PCI prepping: enable device set bus mastering and dma mask
2039 */
2040 rval = pci_enable_device(pdev);
2041
2042 if (rval) {
2043 return rval;
2044 }
2045
2046 pci_set_master(pdev);
2047
2048 /*
2049 * All our contollers are capable of performing 64-bit DMA
2050 */
2051 if (IS_DMA64) {
2052 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
2053
2054 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2055 goto fail_set_dma_mask;
2056 }
2057 } else {
2058 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2059 goto fail_set_dma_mask;
2060 }
2061
2062 host = scsi_host_alloc(&megasas_template,
2063 sizeof(struct megasas_instance));
2064
2065 if (!host) {
2066 printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
2067 goto fail_alloc_instance;
2068 }
2069
2070 instance = (struct megasas_instance *)host->hostdata;
2071 memset(instance, 0, sizeof(*instance));
2072
2073 instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
2074 &instance->producer_h);
2075 instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
2076 &instance->consumer_h);
2077
2078 if (!instance->producer || !instance->consumer) {
2079 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2080 "producer, consumer\n");
2081 goto fail_alloc_dma_buf;
2082 }
2083
2084 *instance->producer = 0;
2085 *instance->consumer = 0;
2086
2087 instance->evt_detail = pci_alloc_consistent(pdev,
2088 sizeof(struct
2089 megasas_evt_detail),
2090 &instance->evt_detail_h);
2091
2092 if (!instance->evt_detail) {
2093 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2094 "event detail structure\n");
2095 goto fail_alloc_dma_buf;
2096 }
2097
2098 /*
2099 * Initialize locks and queues
2100 */
2101 INIT_LIST_HEAD(&instance->cmd_pool);
2102
2103 init_waitqueue_head(&instance->int_cmd_wait_q);
2104 init_waitqueue_head(&instance->abort_cmd_wait_q);
2105
2106 spin_lock_init(&instance->cmd_pool_lock);
2107 spin_lock_init(&instance->instance_lock);
2108
2109 sema_init(&instance->aen_mutex, 1);
2110 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
2111
2112 /*
2113 * Initialize PCI related and misc parameters
2114 */
2115 instance->pdev = pdev;
2116 instance->host = host;
2117 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
2118 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
2119
2120 /*
2121 * Initialize MFI Firmware
2122 */
2123 if (megasas_init_mfi(instance))
2124 goto fail_init_mfi;
2125
2126 /*
2127 * Register IRQ
2128 */
2129 if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) {
2130 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
2131 goto fail_irq;
2132 }
2133
2134 megasas_enable_intr(instance->reg_set);
2135
2136 /*
2137 * Store instance in PCI softstate
2138 */
2139 pci_set_drvdata(pdev, instance);
2140
2141 /*
2142 * Add this controller to megasas_mgmt_info structure so that it
2143 * can be exported to management applications
2144 */
2145 megasas_mgmt_info.count++;
2146 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
2147 megasas_mgmt_info.max_index++;
2148
2149 /*
2150 * Initiate AEN (Asynchronous Event Notification)
2151 */
2152 if (megasas_start_aen(instance)) {
2153 printk(KERN_DEBUG "megasas: start aen failed\n");
2154 goto fail_start_aen;
2155 }
2156
2157 /*
2158 * Register with SCSI mid-layer
2159 */
2160 if (megasas_io_attach(instance))
2161 goto fail_io_attach;
2162
2163 return 0;
2164
2165 fail_start_aen:
2166 fail_io_attach:
2167 megasas_mgmt_info.count--;
2168 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
2169 megasas_mgmt_info.max_index--;
2170
2171 pci_set_drvdata(pdev, NULL);
2172 megasas_disable_intr(instance->reg_set);
2173 free_irq(instance->pdev->irq, instance);
2174
2175 megasas_release_mfi(instance);
2176
2177 fail_irq:
2178 fail_init_mfi:
2179 fail_alloc_dma_buf:
2180 if (instance->evt_detail)
2181 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2182 instance->evt_detail,
2183 instance->evt_detail_h);
2184
2185 if (instance->producer)
2186 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2187 instance->producer_h);
2188 if (instance->consumer)
2189 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2190 instance->consumer_h);
2191 scsi_host_put(host);
2192
2193 fail_alloc_instance:
2194 fail_set_dma_mask:
2195 pci_disable_device(pdev);
2196
2197 return -ENODEV;
2198}
2199
2200/**
2201 * megasas_flush_cache - Requests FW to flush all its caches
2202 * @instance: Adapter soft state
2203 */
2204static void megasas_flush_cache(struct megasas_instance *instance)
2205{
2206 struct megasas_cmd *cmd;
2207 struct megasas_dcmd_frame *dcmd;
2208
2209 cmd = megasas_get_cmd(instance);
2210
2211 if (!cmd)
2212 return;
2213
2214 dcmd = &cmd->frame->dcmd;
2215
2216 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2217
2218 dcmd->cmd = MFI_CMD_DCMD;
2219 dcmd->cmd_status = 0x0;
2220 dcmd->sge_count = 0;
2221 dcmd->flags = MFI_FRAME_DIR_NONE;
2222 dcmd->timeout = 0;
2223 dcmd->data_xfer_len = 0;
2224 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2225 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2226
2227 megasas_issue_blocked_cmd(instance, cmd);
2228
2229 megasas_return_cmd(instance, cmd);
2230
2231 return;
2232}
2233
2234/**
2235 * megasas_shutdown_controller - Instructs FW to shutdown the controller
2236 * @instance: Adapter soft state
2237 */
2238static void megasas_shutdown_controller(struct megasas_instance *instance)
2239{
2240 struct megasas_cmd *cmd;
2241 struct megasas_dcmd_frame *dcmd;
2242
2243 cmd = megasas_get_cmd(instance);
2244
2245 if (!cmd)
2246 return;
2247
2248 if (instance->aen_cmd)
2249 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
2250
2251 dcmd = &cmd->frame->dcmd;
2252
2253 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2254
2255 dcmd->cmd = MFI_CMD_DCMD;
2256 dcmd->cmd_status = 0x0;
2257 dcmd->sge_count = 0;
2258 dcmd->flags = MFI_FRAME_DIR_NONE;
2259 dcmd->timeout = 0;
2260 dcmd->data_xfer_len = 0;
2261 dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
2262
2263 megasas_issue_blocked_cmd(instance, cmd);
2264
2265 megasas_return_cmd(instance, cmd);
2266
2267 return;
2268}
2269
2270/**
2271 * megasas_detach_one - PCI hot"un"plug entry point
2272 * @pdev: PCI device structure
2273 */
2274static void megasas_detach_one(struct pci_dev *pdev)
2275{
2276 int i;
2277 struct Scsi_Host *host;
2278 struct megasas_instance *instance;
2279
2280 instance = pci_get_drvdata(pdev);
2281 host = instance->host;
2282
2283 scsi_remove_host(instance->host);
2284 megasas_flush_cache(instance);
2285 megasas_shutdown_controller(instance);
2286
2287 /*
2288 * Take the instance off the instance array. Note that we will not
2289 * decrement the max_index. We let this array be sparse array
2290 */
2291 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2292 if (megasas_mgmt_info.instance[i] == instance) {
2293 megasas_mgmt_info.count--;
2294 megasas_mgmt_info.instance[i] = NULL;
2295
2296 break;
2297 }
2298 }
2299
2300 pci_set_drvdata(instance->pdev, NULL);
2301
2302 megasas_disable_intr(instance->reg_set);
2303
2304 free_irq(instance->pdev->irq, instance);
2305
2306 megasas_release_mfi(instance);
2307
2308 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2309 instance->evt_detail, instance->evt_detail_h);
2310
2311 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2312 instance->producer_h);
2313
2314 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2315 instance->consumer_h);
2316
2317 scsi_host_put(host);
2318
2319 pci_set_drvdata(pdev, NULL);
2320
2321 pci_disable_device(pdev);
2322
2323 return;
2324}
2325
2326/**
2327 * megasas_shutdown - Shutdown entry point
2328 * @device: Generic device structure
2329 */
2330static void megasas_shutdown(struct pci_dev *pdev)
2331{
2332 struct megasas_instance *instance = pci_get_drvdata(pdev);
2333 megasas_flush_cache(instance);
2334}
2335
2336/**
2337 * megasas_mgmt_open - char node "open" entry point
2338 */
2339static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2340{
2341 /*
2342 * Allow only those users with admin rights
2343 */
2344 if (!capable(CAP_SYS_ADMIN))
2345 return -EACCES;
2346
2347 return 0;
2348}
2349
2350/**
2351 * megasas_mgmt_release - char node "release" entry point
2352 */
2353static int megasas_mgmt_release(struct inode *inode, struct file *filep)
2354{
2355 filep->private_data = NULL;
2356 fasync_helper(-1, filep, 0, &megasas_async_queue);
2357
2358 return 0;
2359}
2360
2361/**
2362 * megasas_mgmt_fasync - Async notifier registration from applications
2363 *
2364 * This function adds the calling process to a driver global queue. When an
2365 * event occurs, SIGIO will be sent to all processes in this queue.
2366 */
2367static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
2368{
2369 int rc;
2370
2371 down(&megasas_async_queue_mutex);
2372
2373 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
2374
2375 up(&megasas_async_queue_mutex);
2376
2377 if (rc >= 0) {
2378 /* For sanity check when we get ioctl */
2379 filep->private_data = filep;
2380 return 0;
2381 }
2382
2383 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
2384
2385 return rc;
2386}
2387
2388/**
2389 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
2390 * @instance: Adapter soft state
2391 * @argp: User's ioctl packet
2392 */
2393static int
2394megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
2395 struct megasas_iocpacket __user * user_ioc,
2396 struct megasas_iocpacket *ioc)
2397{
2398 struct megasas_sge32 *kern_sge32;
2399 struct megasas_cmd *cmd;
2400 void *kbuff_arr[MAX_IOCTL_SGE];
2401 dma_addr_t buf_handle = 0;
2402 int error = 0, i;
2403 void *sense = NULL;
2404 dma_addr_t sense_handle;
2405 u32 *sense_ptr;
2406
2407 memset(kbuff_arr, 0, sizeof(kbuff_arr));
2408
2409 if (ioc->sge_count > MAX_IOCTL_SGE) {
2410 printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
2411 ioc->sge_count, MAX_IOCTL_SGE);
2412 return -EINVAL;
2413 }
2414
2415 cmd = megasas_get_cmd(instance);
2416 if (!cmd) {
2417 printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
2418 return -ENOMEM;
2419 }
2420
2421 /*
2422 * User's IOCTL packet has 2 frames (maximum). Copy those two
2423 * frames into our cmd's frames. cmd->frame's context will get
2424 * overwritten when we copy from user's frames. So set that value
2425 * alone separately
2426 */
2427 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
2428 cmd->frame->hdr.context = cmd->index;
2429
2430 /*
2431 * The management interface between applications and the fw uses
2432 * MFI frames. E.g, RAID configuration changes, LD property changes
2433 * etc are accomplishes through different kinds of MFI frames. The
2434 * driver needs to care only about substituting user buffers with
2435 * kernel buffers in SGLs. The location of SGL is embedded in the
2436 * struct iocpacket itself.
2437 */
2438 kern_sge32 = (struct megasas_sge32 *)
2439 ((unsigned long)cmd->frame + ioc->sgl_off);
2440
2441 /*
2442 * For each user buffer, create a mirror buffer and copy in
2443 */
2444 for (i = 0; i < ioc->sge_count; i++) {
2445 kbuff_arr[i] = pci_alloc_consistent(instance->pdev,
2446 ioc->sgl[i].iov_len,
2447 &buf_handle);
2448 if (!kbuff_arr[i]) {
2449 printk(KERN_DEBUG "megasas: Failed to alloc "
2450 "kernel SGL buffer for IOCTL \n");
2451 error = -ENOMEM;
2452 goto out;
2453 }
2454
2455 /*
2456 * We don't change the dma_coherent_mask, so
2457 * pci_alloc_consistent only returns 32bit addresses
2458 */
2459 kern_sge32[i].phys_addr = (u32) buf_handle;
2460 kern_sge32[i].length = ioc->sgl[i].iov_len;
2461
2462 /*
2463 * We created a kernel buffer corresponding to the
2464 * user buffer. Now copy in from the user buffer
2465 */
2466 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
2467 (u32) (ioc->sgl[i].iov_len))) {
2468 error = -EFAULT;
2469 goto out;
2470 }
2471 }
2472
2473 if (ioc->sense_len) {
2474 sense = pci_alloc_consistent(instance->pdev, ioc->sense_len,
2475 &sense_handle);
2476 if (!sense) {
2477 error = -ENOMEM;
2478 goto out;
2479 }
2480
2481 sense_ptr =
2482 (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
2483 *sense_ptr = sense_handle;
2484 }
2485
2486 /*
2487 * Set the sync_cmd flag so that the ISR knows not to complete this
2488 * cmd to the SCSI mid-layer
2489 */
2490 cmd->sync_cmd = 1;
2491 megasas_issue_blocked_cmd(instance, cmd);
2492 cmd->sync_cmd = 0;
2493
2494 /*
2495 * copy out the kernel buffers to user buffers
2496 */
2497 for (i = 0; i < ioc->sge_count; i++) {
2498 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
2499 ioc->sgl[i].iov_len)) {
2500 error = -EFAULT;
2501 goto out;
2502 }
2503 }
2504
2505 /*
2506 * copy out the sense
2507 */
2508 if (ioc->sense_len) {
2509 /*
2510 * sense_ptr points to the location that has the user
2511 * sense buffer address
2512 */
2513 sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
2514 ioc->sense_off);
2515
2516 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
2517 sense, ioc->sense_len)) {
2518 error = -EFAULT;
2519 goto out;
2520 }
2521 }
2522
2523 /*
2524 * copy the status codes returned by the fw
2525 */
2526 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
2527 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
2528 printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
2529 error = -EFAULT;
2530 }
2531
2532 out:
2533 if (sense) {
2534 pci_free_consistent(instance->pdev, ioc->sense_len,
2535 sense, sense_handle);
2536 }
2537
2538 for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
2539 pci_free_consistent(instance->pdev,
2540 kern_sge32[i].length,
2541 kbuff_arr[i], kern_sge32[i].phys_addr);
2542 }
2543
2544 megasas_return_cmd(instance, cmd);
2545 return error;
2546}
2547
2548static struct megasas_instance *megasas_lookup_instance(u16 host_no)
2549{
2550 int i;
2551
2552 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2553
2554 if ((megasas_mgmt_info.instance[i]) &&
2555 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
2556 return megasas_mgmt_info.instance[i];
2557 }
2558
2559 return NULL;
2560}
2561
2562static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
2563{
2564 struct megasas_iocpacket __user *user_ioc =
2565 (struct megasas_iocpacket __user *)arg;
2566 struct megasas_iocpacket *ioc;
2567 struct megasas_instance *instance;
2568 int error;
2569
2570 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
2571 if (!ioc)
2572 return -ENOMEM;
2573
2574 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
2575 error = -EFAULT;
2576 goto out_kfree_ioc;
2577 }
2578
2579 instance = megasas_lookup_instance(ioc->host_no);
2580 if (!instance) {
2581 error = -ENODEV;
2582 goto out_kfree_ioc;
2583 }
2584
2585 /*
2586 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
2587 */
2588 if (down_interruptible(&instance->ioctl_sem)) {
2589 error = -ERESTARTSYS;
2590 goto out_kfree_ioc;
2591 }
2592 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
2593 up(&instance->ioctl_sem);
2594
2595 out_kfree_ioc:
2596 kfree(ioc);
2597 return error;
2598}
2599
2600static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
2601{
2602 struct megasas_instance *instance;
2603 struct megasas_aen aen;
2604 int error;
2605
2606 if (file->private_data != file) {
2607 printk(KERN_DEBUG "megasas: fasync_helper was not "
2608 "called first\n");
2609 return -EINVAL;
2610 }
2611
2612 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
2613 return -EFAULT;
2614
2615 instance = megasas_lookup_instance(aen.host_no);
2616
2617 if (!instance)
2618 return -ENODEV;
2619
2620 down(&instance->aen_mutex);
2621 error = megasas_register_aen(instance, aen.seq_num,
2622 aen.class_locale_word);
2623 up(&instance->aen_mutex);
2624 return error;
2625}
2626
2627/**
2628 * megasas_mgmt_ioctl - char node ioctl entry point
2629 */
2630static long
2631megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2632{
2633 switch (cmd) {
2634 case MEGASAS_IOC_FIRMWARE:
2635 return megasas_mgmt_ioctl_fw(file, arg);
2636
2637 case MEGASAS_IOC_GET_AEN:
2638 return megasas_mgmt_ioctl_aen(file, arg);
2639 }
2640
2641 return -ENOTTY;
2642}
2643
2644#ifdef CONFIG_COMPAT
2645static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2646{
2647 struct compat_megasas_iocpacket __user *cioc =
2648 (struct compat_megasas_iocpacket __user *)arg;
2649 struct megasas_iocpacket __user *ioc =
2650 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
2651 int i;
2652 int error = 0;
2653
2654 clear_user(ioc, sizeof(*ioc));
2655
2656 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
2657 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
2658 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
2659 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
2660 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
2661 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
2662 return -EFAULT;
2663
2664 for (i = 0; i < MAX_IOCTL_SGE; i++) {
2665 compat_uptr_t ptr;
2666
2667 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
2668 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
2669 copy_in_user(&ioc->sgl[i].iov_len,
2670 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
2671 return -EFAULT;
2672 }
2673
2674 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
2675
2676 if (copy_in_user(&cioc->frame.hdr.cmd_status,
2677 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
2678 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
2679 return -EFAULT;
2680 }
2681 return error;
2682}
2683
2684static long
2685megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
2686 unsigned long arg)
2687{
2688 switch (cmd) {
2689 case MEGASAS_IOC_FIRMWARE:{
2690 return megasas_mgmt_compat_ioctl_fw(file, arg);
2691 }
2692 case MEGASAS_IOC_GET_AEN:
2693 return megasas_mgmt_ioctl_aen(file, arg);
2694 }
2695
2696 return -ENOTTY;
2697}
2698#endif
2699
2700/*
2701 * File operations structure for management interface
2702 */
2703static struct file_operations megasas_mgmt_fops = {
2704 .owner = THIS_MODULE,
2705 .open = megasas_mgmt_open,
2706 .release = megasas_mgmt_release,
2707 .fasync = megasas_mgmt_fasync,
2708 .unlocked_ioctl = megasas_mgmt_ioctl,
2709#ifdef CONFIG_COMPAT
2710 .compat_ioctl = megasas_mgmt_compat_ioctl,
2711#endif
2712};
2713
2714/*
2715 * PCI hotplug support registration structure
2716 */
2717static struct pci_driver megasas_pci_driver = {
2718
2719 .name = "megaraid_sas",
2720 .id_table = megasas_pci_table,
2721 .probe = megasas_probe_one,
2722 .remove = __devexit_p(megasas_detach_one),
2723 .shutdown = megasas_shutdown,
2724};
2725
2726/*
2727 * Sysfs driver attributes
2728 */
2729static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
2730{
2731 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
2732 MEGASAS_VERSION);
2733}
2734
2735static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
2736
2737static ssize_t
2738megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
2739{
2740 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
2741 MEGASAS_RELDATE);
2742}
2743
2744static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
2745 NULL);
2746
2747/**
2748 * megasas_init - Driver load entry point
2749 */
2750static int __init megasas_init(void)
2751{
2752 int rval;
2753
2754 /*
2755 * Announce driver version and other information
2756 */
2757 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
2758 MEGASAS_EXT_VERSION);
2759
2760 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
2761
2762 /*
2763 * Register character device node
2764 */
2765 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
2766
2767 if (rval < 0) {
2768 printk(KERN_DEBUG "megasas: failed to open device node\n");
2769 return rval;
2770 }
2771
2772 megasas_mgmt_majorno = rval;
2773
2774 /*
2775 * Register ourselves as PCI hotplug module
2776 */
2777 rval = pci_module_init(&megasas_pci_driver);
2778
2779 if (rval) {
2780 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
2781 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2782 }
2783
2784 driver_create_file(&megasas_pci_driver.driver, &driver_attr_version);
2785 driver_create_file(&megasas_pci_driver.driver,
2786 &driver_attr_release_date);
2787
2788 return rval;
2789}
2790
2791/**
2792 * megasas_exit - Driver unload entry point
2793 */
2794static void __exit megasas_exit(void)
2795{
2796 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
2797 driver_remove_file(&megasas_pci_driver.driver,
2798 &driver_attr_release_date);
2799
2800 pci_unregister_driver(&megasas_pci_driver);
2801 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2802}
2803
2804module_init(megasas_init);
2805module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 000000000000..eaec9d531424
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1142 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.h
13 */
14
15#ifndef LSI_MEGARAID_SAS_H
16#define LSI_MEGARAID_SAS_H
17
18/**
19 * MegaRAID SAS Driver meta data
20 */
21#define MEGASAS_VERSION "00.00.02.00-rc4"
22#define MEGASAS_RELDATE "Sep 16, 2005"
23#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005"
24
25/*
26 * =====================================
27 * MegaRAID SAS MFI firmware definitions
28 * =====================================
29 */
30
31/*
32 * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
33 * protocol between the software and firmware. Commands are issued using
34 * "message frames"
35 */
36
37/**
38 * FW posts its state in upper 4 bits of outbound_msg_0 register
39 */
40#define MFI_STATE_MASK 0xF0000000
41#define MFI_STATE_UNDEFINED 0x00000000
42#define MFI_STATE_BB_INIT 0x10000000
43#define MFI_STATE_FW_INIT 0x40000000
44#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
45#define MFI_STATE_FW_INIT_2 0x70000000
46#define MFI_STATE_DEVICE_SCAN 0x80000000
47#define MFI_STATE_FLUSH_CACHE 0xA0000000
48#define MFI_STATE_READY 0xB0000000
49#define MFI_STATE_OPERATIONAL 0xC0000000
50#define MFI_STATE_FAULT 0xF0000000
51
52#define MEGAMFI_FRAME_SIZE 64
53
54/**
55 * During FW init, clear pending cmds & reset state using inbound_msg_0
56 *
57 * ABORT : Abort all pending cmds
58 * READY : Move from OPERATIONAL to READY state; discard queue info
59 * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
60 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
61 */
62#define MFI_INIT_ABORT 0x00000000
63#define MFI_INIT_READY 0x00000002
64#define MFI_INIT_MFIMODE 0x00000004
65#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
66#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE
67
68/**
69 * MFI frame flags
70 */
71#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
72#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
73#define MFI_FRAME_SGL32 0x0000
74#define MFI_FRAME_SGL64 0x0002
75#define MFI_FRAME_SENSE32 0x0000
76#define MFI_FRAME_SENSE64 0x0004
77#define MFI_FRAME_DIR_NONE 0x0000
78#define MFI_FRAME_DIR_WRITE 0x0008
79#define MFI_FRAME_DIR_READ 0x0010
80#define MFI_FRAME_DIR_BOTH 0x0018
81
82/**
83 * Definition for cmd_status
84 */
85#define MFI_CMD_STATUS_POLL_MODE 0xFF
86
87/**
88 * MFI command opcodes
89 */
90#define MFI_CMD_INIT 0x00
91#define MFI_CMD_LD_READ 0x01
92#define MFI_CMD_LD_WRITE 0x02
93#define MFI_CMD_LD_SCSI_IO 0x03
94#define MFI_CMD_PD_SCSI_IO 0x04
95#define MFI_CMD_DCMD 0x05
96#define MFI_CMD_ABORT 0x06
97#define MFI_CMD_SMP 0x07
98#define MFI_CMD_STP 0x08
99
100#define MR_DCMD_CTRL_GET_INFO 0x01010000
101
102#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
103#define MR_FLUSH_CTRL_CACHE 0x01
104#define MR_FLUSH_DISK_CACHE 0x02
105
106#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
107#define MR_ENABLE_DRIVE_SPINDOWN 0x01
108
109#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
110#define MR_DCMD_CTRL_EVENT_GET 0x01040300
111#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
112#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
113
114#define MR_DCMD_CLUSTER 0x08000000
115#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
116#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
117
118/**
119 * MFI command completion codes
120 */
121enum MFI_STAT {
122 MFI_STAT_OK = 0x00,
123 MFI_STAT_INVALID_CMD = 0x01,
124 MFI_STAT_INVALID_DCMD = 0x02,
125 MFI_STAT_INVALID_PARAMETER = 0x03,
126 MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
127 MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
128 MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
129 MFI_STAT_APP_IN_USE = 0x07,
130 MFI_STAT_APP_NOT_INITIALIZED = 0x08,
131 MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
132 MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
133 MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
134 MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
135 MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
136 MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
137 MFI_STAT_FLASH_BUSY = 0x0f,
138 MFI_STAT_FLASH_ERROR = 0x10,
139 MFI_STAT_FLASH_IMAGE_BAD = 0x11,
140 MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
141 MFI_STAT_FLASH_NOT_OPEN = 0x13,
142 MFI_STAT_FLASH_NOT_STARTED = 0x14,
143 MFI_STAT_FLUSH_FAILED = 0x15,
144 MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
145 MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
146 MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
147 MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
148 MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
149 MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
150 MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
151 MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
152 MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
153 MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
154 MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
155 MFI_STAT_MFC_HW_ERROR = 0x21,
156 MFI_STAT_NO_HW_PRESENT = 0x22,
157 MFI_STAT_NOT_FOUND = 0x23,
158 MFI_STAT_NOT_IN_ENCL = 0x24,
159 MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
160 MFI_STAT_PD_TYPE_WRONG = 0x26,
161 MFI_STAT_PR_DISABLED = 0x27,
162 MFI_STAT_ROW_INDEX_INVALID = 0x28,
163 MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
164 MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
165 MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
166 MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
167 MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
168 MFI_STAT_SCSI_IO_FAILED = 0x2e,
169 MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
170 MFI_STAT_SHUTDOWN_FAILED = 0x30,
171 MFI_STAT_TIME_NOT_SET = 0x31,
172 MFI_STAT_WRONG_STATE = 0x32,
173 MFI_STAT_LD_OFFLINE = 0x33,
174 MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
175 MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
176 MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
177 MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
178 MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
179
180 MFI_STAT_INVALID_STATUS = 0xFF
181};
182
183/*
184 * Number of mailbox bytes in DCMD message frame
185 */
186#define MFI_MBOX_SIZE 12
187
188enum MR_EVT_CLASS {
189
190 MR_EVT_CLASS_DEBUG = -2,
191 MR_EVT_CLASS_PROGRESS = -1,
192 MR_EVT_CLASS_INFO = 0,
193 MR_EVT_CLASS_WARNING = 1,
194 MR_EVT_CLASS_CRITICAL = 2,
195 MR_EVT_CLASS_FATAL = 3,
196 MR_EVT_CLASS_DEAD = 4,
197
198};
199
200enum MR_EVT_LOCALE {
201
202 MR_EVT_LOCALE_LD = 0x0001,
203 MR_EVT_LOCALE_PD = 0x0002,
204 MR_EVT_LOCALE_ENCL = 0x0004,
205 MR_EVT_LOCALE_BBU = 0x0008,
206 MR_EVT_LOCALE_SAS = 0x0010,
207 MR_EVT_LOCALE_CTRL = 0x0020,
208 MR_EVT_LOCALE_CONFIG = 0x0040,
209 MR_EVT_LOCALE_CLUSTER = 0x0080,
210 MR_EVT_LOCALE_ALL = 0xffff,
211
212};
213
214enum MR_EVT_ARGS {
215
216 MR_EVT_ARGS_NONE,
217 MR_EVT_ARGS_CDB_SENSE,
218 MR_EVT_ARGS_LD,
219 MR_EVT_ARGS_LD_COUNT,
220 MR_EVT_ARGS_LD_LBA,
221 MR_EVT_ARGS_LD_OWNER,
222 MR_EVT_ARGS_LD_LBA_PD_LBA,
223 MR_EVT_ARGS_LD_PROG,
224 MR_EVT_ARGS_LD_STATE,
225 MR_EVT_ARGS_LD_STRIP,
226 MR_EVT_ARGS_PD,
227 MR_EVT_ARGS_PD_ERR,
228 MR_EVT_ARGS_PD_LBA,
229 MR_EVT_ARGS_PD_LBA_LD,
230 MR_EVT_ARGS_PD_PROG,
231 MR_EVT_ARGS_PD_STATE,
232 MR_EVT_ARGS_PCI,
233 MR_EVT_ARGS_RATE,
234 MR_EVT_ARGS_STR,
235 MR_EVT_ARGS_TIME,
236 MR_EVT_ARGS_ECC,
237
238};
239
240/*
241 * SAS controller properties
242 */
243struct megasas_ctrl_prop {
244
245 u16 seq_num;
246 u16 pred_fail_poll_interval;
247 u16 intr_throttle_count;
248 u16 intr_throttle_timeouts;
249 u8 rebuild_rate;
250 u8 patrol_read_rate;
251 u8 bgi_rate;
252 u8 cc_rate;
253 u8 recon_rate;
254 u8 cache_flush_interval;
255 u8 spinup_drv_count;
256 u8 spinup_delay;
257 u8 cluster_enable;
258 u8 coercion_mode;
259 u8 alarm_enable;
260 u8 disable_auto_rebuild;
261 u8 disable_battery_warn;
262 u8 ecc_bucket_size;
263 u16 ecc_bucket_leak_rate;
264 u8 restore_hotspare_on_insertion;
265 u8 expose_encl_devices;
266 u8 reserved[38];
267
268} __attribute__ ((packed));
269
270/*
271 * SAS controller information
272 */
273struct megasas_ctrl_info {
274
275 /*
276 * PCI device information
277 */
278 struct {
279
280 u16 vendor_id;
281 u16 device_id;
282 u16 sub_vendor_id;
283 u16 sub_device_id;
284 u8 reserved[24];
285
286 } __attribute__ ((packed)) pci;
287
288 /*
289 * Host interface information
290 */
291 struct {
292
293 u8 PCIX:1;
294 u8 PCIE:1;
295 u8 iSCSI:1;
296 u8 SAS_3G:1;
297 u8 reserved_0:4;
298 u8 reserved_1[6];
299 u8 port_count;
300 u64 port_addr[8];
301
302 } __attribute__ ((packed)) host_interface;
303
304 /*
305 * Device (backend) interface information
306 */
307 struct {
308
309 u8 SPI:1;
310 u8 SAS_3G:1;
311 u8 SATA_1_5G:1;
312 u8 SATA_3G:1;
313 u8 reserved_0:4;
314 u8 reserved_1[6];
315 u8 port_count;
316 u64 port_addr[8];
317
318 } __attribute__ ((packed)) device_interface;
319
320 /*
321 * List of components residing in flash. All str are null terminated
322 */
323 u32 image_check_word;
324 u32 image_component_count;
325
326 struct {
327
328 char name[8];
329 char version[32];
330 char build_date[16];
331 char built_time[16];
332
333 } __attribute__ ((packed)) image_component[8];
334
335 /*
336 * List of flash components that have been flashed on the card, but
337 * are not in use, pending reset of the adapter. This list will be
338 * empty if a flash operation has not occurred. All stings are null
339 * terminated
340 */
341 u32 pending_image_component_count;
342
343 struct {
344
345 char name[8];
346 char version[32];
347 char build_date[16];
348 char build_time[16];
349
350 } __attribute__ ((packed)) pending_image_component[8];
351
352 u8 max_arms;
353 u8 max_spans;
354 u8 max_arrays;
355 u8 max_lds;
356
357 char product_name[80];
358 char serial_no[32];
359
360 /*
361 * Other physical/controller/operation information. Indicates the
362 * presence of the hardware
363 */
364 struct {
365
366 u32 bbu:1;
367 u32 alarm:1;
368 u32 nvram:1;
369 u32 uart:1;
370 u32 reserved:28;
371
372 } __attribute__ ((packed)) hw_present;
373
374 u32 current_fw_time;
375
376 /*
377 * Maximum data transfer sizes
378 */
379 u16 max_concurrent_cmds;
380 u16 max_sge_count;
381 u32 max_request_size;
382
383 /*
384 * Logical and physical device counts
385 */
386 u16 ld_present_count;
387 u16 ld_degraded_count;
388 u16 ld_offline_count;
389
390 u16 pd_present_count;
391 u16 pd_disk_present_count;
392 u16 pd_disk_pred_failure_count;
393 u16 pd_disk_failed_count;
394
395 /*
396 * Memory size information
397 */
398 u16 nvram_size;
399 u16 memory_size;
400 u16 flash_size;
401
402 /*
403 * Error counters
404 */
405 u16 mem_correctable_error_count;
406 u16 mem_uncorrectable_error_count;
407
408 /*
409 * Cluster information
410 */
411 u8 cluster_permitted;
412 u8 cluster_active;
413
414 /*
415 * Additional max data transfer sizes
416 */
417 u16 max_strips_per_io;
418
419 /*
420 * Controller capabilities structures
421 */
422 struct {
423
424 u32 raid_level_0:1;
425 u32 raid_level_1:1;
426 u32 raid_level_5:1;
427 u32 raid_level_1E:1;
428 u32 raid_level_6:1;
429 u32 reserved:27;
430
431 } __attribute__ ((packed)) raid_levels;
432
433 struct {
434
435 u32 rbld_rate:1;
436 u32 cc_rate:1;
437 u32 bgi_rate:1;
438 u32 recon_rate:1;
439 u32 patrol_rate:1;
440 u32 alarm_control:1;
441 u32 cluster_supported:1;
442 u32 bbu:1;
443 u32 spanning_allowed:1;
444 u32 dedicated_hotspares:1;
445 u32 revertible_hotspares:1;
446 u32 foreign_config_import:1;
447 u32 self_diagnostic:1;
448 u32 mixed_redundancy_arr:1;
449 u32 global_hot_spares:1;
450 u32 reserved:17;
451
452 } __attribute__ ((packed)) adapter_operations;
453
454 struct {
455
456 u32 read_policy:1;
457 u32 write_policy:1;
458 u32 io_policy:1;
459 u32 access_policy:1;
460 u32 disk_cache_policy:1;
461 u32 reserved:27;
462
463 } __attribute__ ((packed)) ld_operations;
464
465 struct {
466
467 u8 min;
468 u8 max;
469 u8 reserved[2];
470
471 } __attribute__ ((packed)) stripe_sz_ops;
472
473 struct {
474
475 u32 force_online:1;
476 u32 force_offline:1;
477 u32 force_rebuild:1;
478 u32 reserved:29;
479
480 } __attribute__ ((packed)) pd_operations;
481
482 struct {
483
484 u32 ctrl_supports_sas:1;
485 u32 ctrl_supports_sata:1;
486 u32 allow_mix_in_encl:1;
487 u32 allow_mix_in_ld:1;
488 u32 allow_sata_in_cluster:1;
489 u32 reserved:27;
490
491 } __attribute__ ((packed)) pd_mix_support;
492
493 /*
494 * Define ECC single-bit-error bucket information
495 */
496 u8 ecc_bucket_count;
497 u8 reserved_2[11];
498
499 /*
500 * Include the controller properties (changeable items)
501 */
502 struct megasas_ctrl_prop properties;
503
504 /*
505 * Define FW pkg version (set in envt v'bles on OEM basis)
506 */
507 char package_version[0x60];
508
509 u8 pad[0x800 - 0x6a0];
510
511} __attribute__ ((packed));
512
513/*
514 * ===============================
515 * MegaRAID SAS driver definitions
516 * ===============================
517 */
518#define MEGASAS_MAX_PD_CHANNELS 2
519#define MEGASAS_MAX_LD_CHANNELS 2
520#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
521 MEGASAS_MAX_LD_CHANNELS)
522#define MEGASAS_MAX_DEV_PER_CHANNEL 128
523#define MEGASAS_DEFAULT_INIT_ID -1
524#define MEGASAS_MAX_LUN 8
525#define MEGASAS_MAX_LD 64
526
527/*
528 * When SCSI mid-layer calls driver's reset routine, driver waits for
529 * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
530 * that the driver cannot _actually_ abort or reset pending commands. While
531 * it is waiting for the commands to complete, it prints a diagnostic message
532 * every MEGASAS_RESET_NOTICE_INTERVAL seconds
533 */
534#define MEGASAS_RESET_WAIT_TIME 180
535#define MEGASAS_RESET_NOTICE_INTERVAL 5
536
537#define MEGASAS_IOCTL_CMD 0
538
539/*
540 * FW reports the maximum of number of commands that it can accept (maximum
541 * commands that can be outstanding) at any time. The driver must report a
542 * lower number to the mid layer because it can issue a few internal commands
543 * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
544 * is shown below
545 */
546#define MEGASAS_INT_CMDS 32
547
548/*
549 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
550 * SGLs based on the size of dma_addr_t
551 */
552#define IS_DMA64 (sizeof(dma_addr_t) == 8)
553
554#define MFI_OB_INTR_STATUS_MASK 0x00000002
555#define MFI_POLL_TIMEOUT_SECS 10
556
557struct megasas_register_set {
558
559 u32 reserved_0[4]; /*0000h */
560
561 u32 inbound_msg_0; /*0010h */
562 u32 inbound_msg_1; /*0014h */
563 u32 outbound_msg_0; /*0018h */
564 u32 outbound_msg_1; /*001Ch */
565
566 u32 inbound_doorbell; /*0020h */
567 u32 inbound_intr_status; /*0024h */
568 u32 inbound_intr_mask; /*0028h */
569
570 u32 outbound_doorbell; /*002Ch */
571 u32 outbound_intr_status; /*0030h */
572 u32 outbound_intr_mask; /*0034h */
573
574 u32 reserved_1[2]; /*0038h */
575
576 u32 inbound_queue_port; /*0040h */
577 u32 outbound_queue_port; /*0044h */
578
579 u32 reserved_2; /*004Ch */
580
581 u32 index_registers[1004]; /*0050h */
582
583} __attribute__ ((packed));
584
585struct megasas_sge32 {
586
587 u32 phys_addr;
588 u32 length;
589
590} __attribute__ ((packed));
591
592struct megasas_sge64 {
593
594 u64 phys_addr;
595 u32 length;
596
597} __attribute__ ((packed));
598
599union megasas_sgl {
600
601 struct megasas_sge32 sge32[1];
602 struct megasas_sge64 sge64[1];
603
604} __attribute__ ((packed));
605
606struct megasas_header {
607
608 u8 cmd; /*00h */
609 u8 sense_len; /*01h */
610 u8 cmd_status; /*02h */
611 u8 scsi_status; /*03h */
612
613 u8 target_id; /*04h */
614 u8 lun; /*05h */
615 u8 cdb_len; /*06h */
616 u8 sge_count; /*07h */
617
618 u32 context; /*08h */
619 u32 pad_0; /*0Ch */
620
621 u16 flags; /*10h */
622 u16 timeout; /*12h */
623 u32 data_xferlen; /*14h */
624
625} __attribute__ ((packed));
626
627union megasas_sgl_frame {
628
629 struct megasas_sge32 sge32[8];
630 struct megasas_sge64 sge64[5];
631
632} __attribute__ ((packed));
633
634struct megasas_init_frame {
635
636 u8 cmd; /*00h */
637 u8 reserved_0; /*01h */
638 u8 cmd_status; /*02h */
639
640 u8 reserved_1; /*03h */
641 u32 reserved_2; /*04h */
642
643 u32 context; /*08h */
644 u32 pad_0; /*0Ch */
645
646 u16 flags; /*10h */
647 u16 reserved_3; /*12h */
648 u32 data_xfer_len; /*14h */
649
650 u32 queue_info_new_phys_addr_lo; /*18h */
651 u32 queue_info_new_phys_addr_hi; /*1Ch */
652 u32 queue_info_old_phys_addr_lo; /*20h */
653 u32 queue_info_old_phys_addr_hi; /*24h */
654
655 u32 reserved_4[6]; /*28h */
656
657} __attribute__ ((packed));
658
659struct megasas_init_queue_info {
660
661 u32 init_flags; /*00h */
662 u32 reply_queue_entries; /*04h */
663
664 u32 reply_queue_start_phys_addr_lo; /*08h */
665 u32 reply_queue_start_phys_addr_hi; /*0Ch */
666 u32 producer_index_phys_addr_lo; /*10h */
667 u32 producer_index_phys_addr_hi; /*14h */
668 u32 consumer_index_phys_addr_lo; /*18h */
669 u32 consumer_index_phys_addr_hi; /*1Ch */
670
671} __attribute__ ((packed));
672
673struct megasas_io_frame {
674
675 u8 cmd; /*00h */
676 u8 sense_len; /*01h */
677 u8 cmd_status; /*02h */
678 u8 scsi_status; /*03h */
679
680 u8 target_id; /*04h */
681 u8 access_byte; /*05h */
682 u8 reserved_0; /*06h */
683 u8 sge_count; /*07h */
684
685 u32 context; /*08h */
686 u32 pad_0; /*0Ch */
687
688 u16 flags; /*10h */
689 u16 timeout; /*12h */
690 u32 lba_count; /*14h */
691
692 u32 sense_buf_phys_addr_lo; /*18h */
693 u32 sense_buf_phys_addr_hi; /*1Ch */
694
695 u32 start_lba_lo; /*20h */
696 u32 start_lba_hi; /*24h */
697
698 union megasas_sgl sgl; /*28h */
699
700} __attribute__ ((packed));
701
702struct megasas_pthru_frame {
703
704 u8 cmd; /*00h */
705 u8 sense_len; /*01h */
706 u8 cmd_status; /*02h */
707 u8 scsi_status; /*03h */
708
709 u8 target_id; /*04h */
710 u8 lun; /*05h */
711 u8 cdb_len; /*06h */
712 u8 sge_count; /*07h */
713
714 u32 context; /*08h */
715 u32 pad_0; /*0Ch */
716
717 u16 flags; /*10h */
718 u16 timeout; /*12h */
719 u32 data_xfer_len; /*14h */
720
721 u32 sense_buf_phys_addr_lo; /*18h */
722 u32 sense_buf_phys_addr_hi; /*1Ch */
723
724 u8 cdb[16]; /*20h */
725 union megasas_sgl sgl; /*30h */
726
727} __attribute__ ((packed));
728
729struct megasas_dcmd_frame {
730
731 u8 cmd; /*00h */
732 u8 reserved_0; /*01h */
733 u8 cmd_status; /*02h */
734 u8 reserved_1[4]; /*03h */
735 u8 sge_count; /*07h */
736
737 u32 context; /*08h */
738 u32 pad_0; /*0Ch */
739
740 u16 flags; /*10h */
741 u16 timeout; /*12h */
742
743 u32 data_xfer_len; /*14h */
744 u32 opcode; /*18h */
745
746 union { /*1Ch */
747 u8 b[12];
748 u16 s[6];
749 u32 w[3];
750 } mbox;
751
752 union megasas_sgl sgl; /*28h */
753
754} __attribute__ ((packed));
755
756struct megasas_abort_frame {
757
758 u8 cmd; /*00h */
759 u8 reserved_0; /*01h */
760 u8 cmd_status; /*02h */
761
762 u8 reserved_1; /*03h */
763 u32 reserved_2; /*04h */
764
765 u32 context; /*08h */
766 u32 pad_0; /*0Ch */
767
768 u16 flags; /*10h */
769 u16 reserved_3; /*12h */
770 u32 reserved_4; /*14h */
771
772 u32 abort_context; /*18h */
773 u32 pad_1; /*1Ch */
774
775 u32 abort_mfi_phys_addr_lo; /*20h */
776 u32 abort_mfi_phys_addr_hi; /*24h */
777
778 u32 reserved_5[6]; /*28h */
779
780} __attribute__ ((packed));
781
782struct megasas_smp_frame {
783
784 u8 cmd; /*00h */
785 u8 reserved_1; /*01h */
786 u8 cmd_status; /*02h */
787 u8 connection_status; /*03h */
788
789 u8 reserved_2[3]; /*04h */
790 u8 sge_count; /*07h */
791
792 u32 context; /*08h */
793 u32 pad_0; /*0Ch */
794
795 u16 flags; /*10h */
796 u16 timeout; /*12h */
797
798 u32 data_xfer_len; /*14h */
799 u64 sas_addr; /*18h */
800
801 union {
802 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
803 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
804 } sgl;
805
806} __attribute__ ((packed));
807
808struct megasas_stp_frame {
809
810 u8 cmd; /*00h */
811 u8 reserved_1; /*01h */
812 u8 cmd_status; /*02h */
813 u8 reserved_2; /*03h */
814
815 u8 target_id; /*04h */
816 u8 reserved_3[2]; /*05h */
817 u8 sge_count; /*07h */
818
819 u32 context; /*08h */
820 u32 pad_0; /*0Ch */
821
822 u16 flags; /*10h */
823 u16 timeout; /*12h */
824
825 u32 data_xfer_len; /*14h */
826
827 u16 fis[10]; /*18h */
828 u32 stp_flags;
829
830 union {
831 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
832 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
833 } sgl;
834
835} __attribute__ ((packed));
836
837union megasas_frame {
838
839 struct megasas_header hdr;
840 struct megasas_init_frame init;
841 struct megasas_io_frame io;
842 struct megasas_pthru_frame pthru;
843 struct megasas_dcmd_frame dcmd;
844 struct megasas_abort_frame abort;
845 struct megasas_smp_frame smp;
846 struct megasas_stp_frame stp;
847
848 u8 raw_bytes[64];
849};
850
851struct megasas_cmd;
852
853union megasas_evt_class_locale {
854
855 struct {
856 u16 locale;
857 u8 reserved;
858 s8 class;
859 } __attribute__ ((packed)) members;
860
861 u32 word;
862
863} __attribute__ ((packed));
864
865struct megasas_evt_log_info {
866 u32 newest_seq_num;
867 u32 oldest_seq_num;
868 u32 clear_seq_num;
869 u32 shutdown_seq_num;
870 u32 boot_seq_num;
871
872} __attribute__ ((packed));
873
874struct megasas_progress {
875
876 u16 progress;
877 u16 elapsed_seconds;
878
879} __attribute__ ((packed));
880
881struct megasas_evtarg_ld {
882
883 u16 target_id;
884 u8 ld_index;
885 u8 reserved;
886
887} __attribute__ ((packed));
888
889struct megasas_evtarg_pd {
890 u16 device_id;
891 u8 encl_index;
892 u8 slot_number;
893
894} __attribute__ ((packed));
895
896struct megasas_evt_detail {
897
898 u32 seq_num;
899 u32 time_stamp;
900 u32 code;
901 union megasas_evt_class_locale cl;
902 u8 arg_type;
903 u8 reserved1[15];
904
905 union {
906 struct {
907 struct megasas_evtarg_pd pd;
908 u8 cdb_length;
909 u8 sense_length;
910 u8 reserved[2];
911 u8 cdb[16];
912 u8 sense[64];
913 } __attribute__ ((packed)) cdbSense;
914
915 struct megasas_evtarg_ld ld;
916
917 struct {
918 struct megasas_evtarg_ld ld;
919 u64 count;
920 } __attribute__ ((packed)) ld_count;
921
922 struct {
923 u64 lba;
924 struct megasas_evtarg_ld ld;
925 } __attribute__ ((packed)) ld_lba;
926
927 struct {
928 struct megasas_evtarg_ld ld;
929 u32 prevOwner;
930 u32 newOwner;
931 } __attribute__ ((packed)) ld_owner;
932
933 struct {
934 u64 ld_lba;
935 u64 pd_lba;
936 struct megasas_evtarg_ld ld;
937 struct megasas_evtarg_pd pd;
938 } __attribute__ ((packed)) ld_lba_pd_lba;
939
940 struct {
941 struct megasas_evtarg_ld ld;
942 struct megasas_progress prog;
943 } __attribute__ ((packed)) ld_prog;
944
945 struct {
946 struct megasas_evtarg_ld ld;
947 u32 prev_state;
948 u32 new_state;
949 } __attribute__ ((packed)) ld_state;
950
951 struct {
952 u64 strip;
953 struct megasas_evtarg_ld ld;
954 } __attribute__ ((packed)) ld_strip;
955
956 struct megasas_evtarg_pd pd;
957
958 struct {
959 struct megasas_evtarg_pd pd;
960 u32 err;
961 } __attribute__ ((packed)) pd_err;
962
963 struct {
964 u64 lba;
965 struct megasas_evtarg_pd pd;
966 } __attribute__ ((packed)) pd_lba;
967
968 struct {
969 u64 lba;
970 struct megasas_evtarg_pd pd;
971 struct megasas_evtarg_ld ld;
972 } __attribute__ ((packed)) pd_lba_ld;
973
974 struct {
975 struct megasas_evtarg_pd pd;
976 struct megasas_progress prog;
977 } __attribute__ ((packed)) pd_prog;
978
979 struct {
980 struct megasas_evtarg_pd pd;
981 u32 prevState;
982 u32 newState;
983 } __attribute__ ((packed)) pd_state;
984
985 struct {
986 u16 vendorId;
987 u16 deviceId;
988 u16 subVendorId;
989 u16 subDeviceId;
990 } __attribute__ ((packed)) pci;
991
992 u32 rate;
993 char str[96];
994
995 struct {
996 u32 rtc;
997 u32 elapsedSeconds;
998 } __attribute__ ((packed)) time;
999
1000 struct {
1001 u32 ecar;
1002 u32 elog;
1003 char str[64];
1004 } __attribute__ ((packed)) ecc;
1005
1006 u8 b[96];
1007 u16 s[48];
1008 u32 w[24];
1009 u64 d[12];
1010 } args;
1011
1012 char description[128];
1013
1014} __attribute__ ((packed));
1015
1016struct megasas_instance {
1017
1018 u32 *producer;
1019 dma_addr_t producer_h;
1020 u32 *consumer;
1021 dma_addr_t consumer_h;
1022
1023 u32 *reply_queue;
1024 dma_addr_t reply_queue_h;
1025
1026 unsigned long base_addr;
1027 struct megasas_register_set __iomem *reg_set;
1028
1029 s8 init_id;
1030 u8 reserved[3];
1031
1032 u16 max_num_sge;
1033 u16 max_fw_cmds;
1034 u32 max_sectors_per_req;
1035
1036 struct megasas_cmd **cmd_list;
1037 struct list_head cmd_pool;
1038 spinlock_t cmd_pool_lock;
1039 struct dma_pool *frame_dma_pool;
1040 struct dma_pool *sense_dma_pool;
1041
1042 struct megasas_evt_detail *evt_detail;
1043 dma_addr_t evt_detail_h;
1044 struct megasas_cmd *aen_cmd;
1045 struct semaphore aen_mutex;
1046 struct semaphore ioctl_sem;
1047
1048 struct Scsi_Host *host;
1049
1050 wait_queue_head_t int_cmd_wait_q;
1051 wait_queue_head_t abort_cmd_wait_q;
1052
1053 struct pci_dev *pdev;
1054 u32 unique_id;
1055
1056 u32 fw_outstanding;
1057 u32 hw_crit_error;
1058 spinlock_t instance_lock;
1059};
1060
1061#define MEGASAS_IS_LOGICAL(scp) \
1062 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
1063
1064#define MEGASAS_DEV_INDEX(inst, scp) \
1065 ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
1066 scp->device->id
1067
1068struct megasas_cmd {
1069
1070 union megasas_frame *frame;
1071 dma_addr_t frame_phys_addr;
1072 u8 *sense;
1073 dma_addr_t sense_phys_addr;
1074
1075 u32 index;
1076 u8 sync_cmd;
1077 u8 cmd_status;
1078 u16 abort_aen;
1079
1080 struct list_head list;
1081 struct scsi_cmnd *scmd;
1082 struct megasas_instance *instance;
1083 u32 frame_count;
1084};
1085
1086#define MAX_MGMT_ADAPTERS 1024
1087#define MAX_IOCTL_SGE 16
1088
1089struct megasas_iocpacket {
1090
1091 u16 host_no;
1092 u16 __pad1;
1093 u32 sgl_off;
1094 u32 sge_count;
1095 u32 sense_off;
1096 u32 sense_len;
1097 union {
1098 u8 raw[128];
1099 struct megasas_header hdr;
1100 } frame;
1101
1102 struct iovec sgl[MAX_IOCTL_SGE];
1103
1104} __attribute__ ((packed));
1105
1106struct megasas_aen {
1107 u16 host_no;
1108 u16 __pad1;
1109 u32 seq_num;
1110 u32 class_locale_word;
1111} __attribute__ ((packed));
1112
1113#ifdef CONFIG_COMPAT
1114struct compat_megasas_iocpacket {
1115 u16 host_no;
1116 u16 __pad1;
1117 u32 sgl_off;
1118 u32 sge_count;
1119 u32 sense_off;
1120 u32 sense_len;
1121 union {
1122 u8 raw[128];
1123 struct megasas_header hdr;
1124 } frame;
1125 struct compat_iovec sgl[MAX_IOCTL_SGE];
1126} __attribute__ ((packed));
1127
1128#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket)
1129#else
1130#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
1131#endif
1132
1133#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
1134
1135struct megasas_mgmt_info {
1136
1137 u16 count;
1138 struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
1139 int max_index;
1140};
1141
1142#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a4857db4f9b8..b235556b7b65 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1959 /* Set it up */ 1959 /* Set it up */
1960 mesh_init(ms); 1960 mesh_init(ms);
1961 1961
1962 /* XXX FIXME: error should be fatal */ 1962 /* Request interrupt */
1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) 1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); 1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1965 goto out_shutdown;
1966 }
1965 1967
1966 /* XXX FIXME: handle failure */ 1968 /* Add scsi host & scan */
1967 scsi_add_host(mesh_host, &mdev->ofdev.dev); 1969 if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1970 goto out_release_irq;
1968 scsi_scan_host(mesh_host); 1971 scsi_scan_host(mesh_host);
1969 1972
1970 return 0; 1973 return 0;
1971 1974
1972out_unmap: 1975 out_release_irq:
1976 free_irq(ms->meshintr, ms);
1977 out_shutdown:
1978 /* shutdown & reset bus in case of error or macos can be confused
1979 * at reboot if the bus was set to synchronous mode already
1980 */
1981 mesh_shutdown(mdev);
1982 set_mesh_power(ms, 0);
1983 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1984 ms->dma_cmd_space, ms->dma_cmd_bus);
1985 out_unmap:
1973 iounmap(ms->dma); 1986 iounmap(ms->dma);
1974 iounmap(ms->mesh); 1987 iounmap(ms->mesh);
1975out_free: 1988 out_free:
1976 scsi_host_put(mesh_host); 1989 scsi_host_put(mesh_host);
1977out_release: 1990 out_release:
1978 macio_release_resources(mdev); 1991 macio_release_resources(mdev);
1979 1992
1980 return -ENODEV; 1993 return -ENODEV;
@@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev)
2001 2014
2002 /* Free DMA commands memory */ 2015 /* Free DMA commands memory */
2003 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, 2016 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2004 ms->dma_cmd_space, ms->dma_cmd_bus); 2017 ms->dma_cmd_space, ms->dma_cmd_bus);
2005 2018
2006 /* Release memory resources */ 2019 /* Release memory resources */
2007 macio_release_resources(mdev); 2020 macio_release_resources(mdev);
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index bdc3bc74bbe1..1eba98828636 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
330 fcport->flags &= ~FCF_FAILOVER_NEEDED; 330 fcport->flags &= ~FCF_FAILOVER_NEEDED;
331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
332 atomic_set(&fcport->state, FCS_ONLINE); 332 atomic_set(&fcport->state, FCS_ONLINE);
333 if (fcport->rport)
334 fc_remote_port_unblock(fcport->rport);
333} 335}
334 336
335 337
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index a1d62dee3be6..c05653c7779d 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = {
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, 159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
162 PCI_ANY_ID, PCI_ANY_ID, 164 PCI_ANY_ID, PCI_ANY_ID,
163 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fcf9f6cbb142..327c5d7e5bd2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
587 if (sdev->scsi_level >= 2 || 587 if (sdev->scsi_level >= 2 ||
588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
589 sdev->scsi_level++; 589 sdev->scsi_level++;
590 sdev->sdev_target->scsi_level = sdev->scsi_level;
590 591
591 return 0; 592 return 0;
592} 593}
@@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
771 return SCSI_SCAN_LUN_PRESENT; 772 return SCSI_SCAN_LUN_PRESENT;
772} 773}
773 774
775static inline void scsi_destroy_sdev(struct scsi_device *sdev)
776{
777 if (sdev->host->hostt->slave_destroy)
778 sdev->host->hostt->slave_destroy(sdev);
779 transport_destroy_device(&sdev->sdev_gendev);
780 put_device(&sdev->sdev_gendev);
781}
782
783
774/** 784/**
775 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 785 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
776 * @starget: pointer to target device structure 786 * @starget: pointer to target device structure
@@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
803 * The rescan flag is used as an optimization, the first scan of a 813 * The rescan flag is used as an optimization, the first scan of a
804 * host adapter calls into here with rescan == 0. 814 * host adapter calls into here with rescan == 0.
805 */ 815 */
806 if (rescan) { 816 sdev = scsi_device_lookup_by_target(starget, lun);
807 sdev = scsi_device_lookup_by_target(starget, lun); 817 if (sdev) {
808 if (sdev) { 818 if (rescan || sdev->sdev_state != SDEV_CREATED) {
809 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 819 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
810 "scsi scan: device exists on %s\n", 820 "scsi scan: device exists on %s\n",
811 sdev->sdev_gendev.bus_id)); 821 sdev->sdev_gendev.bus_id));
@@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
820 sdev->model); 830 sdev->model);
821 return SCSI_SCAN_LUN_PRESENT; 831 return SCSI_SCAN_LUN_PRESENT;
822 } 832 }
823 } 833 scsi_device_put(sdev);
824 834 } else
825 sdev = scsi_alloc_sdev(starget, lun, hostdata); 835 sdev = scsi_alloc_sdev(starget, lun, hostdata);
826 if (!sdev) 836 if (!sdev)
827 goto out; 837 goto out;
828 838
@@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
877 res = SCSI_SCAN_NO_RESPONSE; 887 res = SCSI_SCAN_NO_RESPONSE;
878 } 888 }
879 } 889 }
880 } else { 890 } else
881 if (sdev->host->hostt->slave_destroy) 891 scsi_destroy_sdev(sdev);
882 sdev->host->hostt->slave_destroy(sdev);
883 transport_destroy_device(&sdev->sdev_gendev);
884 put_device(&sdev->sdev_gendev);
885 }
886 out: 892 out:
887 return res; 893 return res;
888} 894}
@@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
1054 * 0: scan completed (or no memory, so further scanning is futile) 1060 * 0: scan completed (or no memory, so further scanning is futile)
1055 * 1: no report lun scan, or not configured 1061 * 1: no report lun scan, or not configured
1056 **/ 1062 **/
1057static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, 1063static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1058 int rescan) 1064 int rescan)
1059{ 1065{
1060 char devname[64]; 1066 char devname[64];
@@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1067 struct scsi_lun *lunp, *lun_data; 1073 struct scsi_lun *lunp, *lun_data;
1068 u8 *data; 1074 u8 *data;
1069 struct scsi_sense_hdr sshdr; 1075 struct scsi_sense_hdr sshdr;
1070 struct scsi_target *starget = scsi_target(sdev); 1076 struct scsi_device *sdev;
1077 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1071 1078
1072 /* 1079 /*
1073 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1080 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
@@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1075 * support more than 8 LUNs. 1082 * support more than 8 LUNs.
1076 */ 1083 */
1077 if ((bflags & BLIST_NOREPORTLUN) || 1084 if ((bflags & BLIST_NOREPORTLUN) ||
1078 sdev->scsi_level < SCSI_2 || 1085 starget->scsi_level < SCSI_2 ||
1079 (sdev->scsi_level < SCSI_3 && 1086 (starget->scsi_level < SCSI_3 &&
1080 (!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) ) 1087 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
1081 return 1; 1088 return 1;
1082 if (bflags & BLIST_NOLUN) 1089 if (bflags & BLIST_NOLUN)
1083 return 0; 1090 return 0;
1084 1091
1092 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1093 sdev = scsi_alloc_sdev(starget, 0, NULL);
1094 if (!sdev)
1095 return 0;
1096 if (scsi_device_get(sdev))
1097 return 0;
1098 }
1099
1085 sprintf(devname, "host %d channel %d id %d", 1100 sprintf(devname, "host %d channel %d id %d",
1086 sdev->host->host_no, sdev->channel, sdev->id); 1101 shost->host_no, sdev->channel, sdev->id);
1087 1102
1088 /* 1103 /*
1089 * Allocate enough to hold the header (the same size as one scsi_lun) 1104 * Allocate enough to hold the header (the same size as one scsi_lun)
@@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1098 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); 1113 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
1099 lun_data = kmalloc(length, GFP_ATOMIC | 1114 lun_data = kmalloc(length, GFP_ATOMIC |
1100 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1115 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1101 if (!lun_data) 1116 if (!lun_data) {
1117 printk(ALLOC_FAILURE_MSG, __FUNCTION__);
1102 goto out; 1118 goto out;
1119 }
1103 1120
1104 scsi_cmd[0] = REPORT_LUNS; 1121 scsi_cmd[0] = REPORT_LUNS;
1105 1122
@@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1201 for (i = 0; i < sizeof(struct scsi_lun); i++) 1218 for (i = 0; i < sizeof(struct scsi_lun); i++)
1202 printk("%02x", data[i]); 1219 printk("%02x", data[i]);
1203 printk(" has a LUN larger than currently supported.\n"); 1220 printk(" has a LUN larger than currently supported.\n");
1204 } else if (lun == 0) {
1205 /*
1206 * LUN 0 has already been scanned.
1207 */
1208 } else if (lun > sdev->host->max_lun) { 1221 } else if (lun > sdev->host->max_lun) {
1209 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" 1222 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
1210 " than allowed by the host adapter\n", 1223 " than allowed by the host adapter\n",
@@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1227 } 1240 }
1228 1241
1229 kfree(lun_data); 1242 kfree(lun_data);
1230 return 0;
1231
1232 out: 1243 out:
1233 /* 1244 scsi_device_put(sdev);
1234 * We are out of memory, don't try scanning any further. 1245 if (sdev->sdev_state == SDEV_CREATED)
1235 */ 1246 /*
1236 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1247 * the sdev we used didn't appear in the report luns scan
1248 */
1249 scsi_destroy_sdev(sdev);
1237 return 0; 1250 return 0;
1238} 1251}
1239 1252
@@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1299 struct Scsi_Host *shost = dev_to_shost(parent); 1312 struct Scsi_Host *shost = dev_to_shost(parent);
1300 int bflags = 0; 1313 int bflags = 0;
1301 int res; 1314 int res;
1302 struct scsi_device *sdev = NULL;
1303 struct scsi_target *starget; 1315 struct scsi_target *starget;
1304 1316
1305 if (shost->this_id == id) 1317 if (shost->this_id == id)
@@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1325 * Scan LUN 0, if there is some response, scan further. Ideally, we 1337 * Scan LUN 0, if there is some response, scan further. Ideally, we
1326 * would not configure LUN 0 until all LUNs are scanned. 1338 * would not configure LUN 0 until all LUNs are scanned.
1327 */ 1339 */
1328 res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL); 1340 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1329 if (res == SCSI_SCAN_LUN_PRESENT) { 1341 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1330 if (scsi_report_lun_scan(sdev, bflags, rescan) != 0) 1342 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1331 /* 1343 /*
1332 * The REPORT LUN did not scan the target, 1344 * The REPORT LUN did not scan the target,
1333 * do a sequential scan. 1345 * do a sequential scan.
1334 */ 1346 */
1335 scsi_sequential_lun_scan(starget, bflags, 1347 scsi_sequential_lun_scan(starget, bflags,
1336 res, sdev->scsi_level, rescan); 1348 res, starget->scsi_level, rescan);
1337 } else if (res == SCSI_SCAN_TARGET_PRESENT) {
1338 /*
1339 * There's a target here, but lun 0 is offline so we
1340 * can't use the report_lun scan. Fall back to a
1341 * sequential lun scan with a bflags of SPARSELUN and
1342 * a default scsi level of SCSI_2
1343 */
1344 scsi_sequential_lun_scan(starget, BLIST_SPARSELUN,
1345 SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan);
1346 } 1349 }
1347 if (sdev)
1348 scsi_device_put(sdev);
1349 1350
1350 out_reap: 1351 out_reap:
1351 /* now determine if the target has any children at all 1352 /* now determine if the target has any children at all
@@ -1542,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
1542{ 1543{
1543 BUG_ON(sdev->id != sdev->host->this_id); 1544 BUG_ON(sdev->id != sdev->host->this_id);
1544 1545
1545 if (sdev->host->hostt->slave_destroy) 1546 scsi_destroy_sdev(sdev);
1546 sdev->host->hostt->slave_destroy(sdev);
1547 transport_destroy_device(&sdev->sdev_gendev);
1548 put_device(&sdev->sdev_gendev);
1549} 1547}
1550EXPORT_SYMBOL(scsi_free_host_dev); 1548EXPORT_SYMBOL(scsi_free_host_dev);
1551 1549
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ff724bbe6611..1d145d2f9a38 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
630 630
631 transport_destroy_device(&rphy->dev); 631 scsi_remove_target(dev);
632 632
633 scsi_remove_target(&rphy->dev); 633 transport_remove_device(dev);
634 device_del(dev);
635 transport_destroy_device(dev);
634 636
635 spin_lock(&sas_host->lock); 637 spin_lock(&sas_host->lock);
636 list_del(&rphy->list); 638 list_del(&rphy->list);
637 spin_unlock(&sas_host->lock); 639 spin_unlock(&sas_host->lock);
638 640
639 transport_remove_device(dev);
640 device_del(dev);
641 transport_destroy_device(dev);
642 put_device(&parent->dev); 641 put_device(&parent->dev);
643} 642}
644EXPORT_SYMBOL(sas_rphy_delete); 643EXPORT_SYMBOL(sas_rphy_delete);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4d09a6e4dd2e..ad94367df430 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2849,8 +2849,7 @@ sg_proc_init(void)
2849 struct proc_dir_entry *pdep; 2849 struct proc_dir_entry *pdep;
2850 struct sg_proc_leaf * leaf; 2850 struct sg_proc_leaf * leaf;
2851 2851
2852 sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname, 2852 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2853 S_IFDIR | S_IRUGO | S_IXUGO, NULL);
2854 if (!sg_proc_sgp) 2853 if (!sg_proc_sgp)
2855 return 1; 2854 return 1;
2856 for (k = 0; k < num_leaves; ++k) { 2855 for (k = 0; k < num_leaves; ++k) {
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index 78c1f36ad9b7..87ef368384fb 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
98{ 98{
99 struct uart_port *port = dev_id; 99 struct uart_port *port = dev_id;
100 struct tty_struct *tty = port->info->tty; 100 struct tty_struct *tty = port->info->tty;
101 unsigned int status, ch, flg, ignored = 0; 101 unsigned int status, ch, flg;
102 102
103 status = clps_readl(SYSFLG(port)); 103 status = clps_readl(SYSFLG(port));
104 while (!(status & SYSFLG_URXFE)) { 104 while (!(status & SYSFLG_URXFE)) {
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4c985e6b3784..4e1e80adaf11 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -860,7 +860,7 @@ imx_console_setup(struct console *co, char *options)
860 return uart_set_options(&sport->port, co, baud, parity, bits, flow); 860 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
861} 861}
862 862
863extern struct uart_driver imx_reg; 863static struct uart_driver imx_reg;
864static struct console imx_console = { 864static struct console imx_console = {
865 .name = "ttySMX", 865 .name = "ttySMX",
866 .write = imx_console_write, 866 .write = imx_console_write,
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 0c5c96a582b3..f88fdd480685 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -973,18 +973,6 @@ static irqreturn_t ioc4_intr(int irq, void *arg, struct pt_regs *regs)
973 this_ir &= ~this_mir; 973 this_ir &= ~this_mir;
974 } 974 }
975 } 975 }
976 if (this_ir) {
977 printk(KERN_ERR
978 "unknown IOC4 %s interrupt 0x%x, sio_ir = 0x%x,"
979 " sio_ies = 0x%x, other_ir = 0x%x :"
980 "other_ies = 0x%x\n",
981 (intr_type == IOC4_SIO_INTR_TYPE) ? "sio" :
982 "other", this_ir,
983 readl(&soft->is_ioc4_misc_addr->sio_ir.raw),
984 readl(&soft->is_ioc4_misc_addr->sio_ies.raw),
985 readl(&soft->is_ioc4_misc_addr->other_ir.raw),
986 readl(&soft->is_ioc4_misc_addr->other_ies.raw));
987 }
988 } 976 }
989#ifdef DEBUG_INTERRUPTS 977#ifdef DEBUG_INTERRUPTS
990 { 978 {
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index c361c6fb0809..50d7870d92bb 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -82,8 +82,6 @@
82#include <asm/arch/regs-serial.h> 82#include <asm/arch/regs-serial.h>
83#include <asm/arch/regs-gpio.h> 83#include <asm/arch/regs-gpio.h>
84 84
85#include <asm/mach-types.h>
86
87/* structures */ 85/* structures */
88 86
89struct s3c24xx_uart_info { 87struct s3c24xx_uart_info {
@@ -753,8 +751,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
753{ 751{
754 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port); 752 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
755 struct s3c24xx_uart_port *ourport = to_ourport(port); 753 struct s3c24xx_uart_port *ourport = to_ourport(port);
756 struct s3c24xx_uart_clksrc *clksrc; 754 struct s3c24xx_uart_clksrc *clksrc = NULL;
757 struct clk *clk; 755 struct clk *clk = NULL;
758 unsigned long flags; 756 unsigned long flags;
759 unsigned int baud, quot; 757 unsigned int baud, quot;
760 unsigned int ulcon; 758 unsigned int ulcon;
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 1ae0b381c162..2c7d3ef76e8e 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -859,6 +859,7 @@ static struct pcmcia_device_id serial_ids[] = {
859 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), 859 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"),
860 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"), 860 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
861 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"), 861 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
862 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
862 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"), 863 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
863 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), 864 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
864 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), 865 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index cbb451d227d2..6385d1a99b60 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -242,7 +242,6 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
242 case HC_STATE_SUSPENDED: 242 case HC_STATE_SUSPENDED:
243 /* no DMA or IRQs except when HC is active */ 243 /* no DMA or IRQs except when HC is active */
244 if (dev->current_state == PCI_D0) { 244 if (dev->current_state == PCI_D0) {
245 free_irq (hcd->irq, hcd);
246 pci_save_state (dev); 245 pci_save_state (dev);
247 pci_disable_device (dev); 246 pci_disable_device (dev);
248 } 247 }
@@ -374,14 +373,6 @@ int usb_hcd_pci_resume (struct pci_dev *dev)
374 373
375 hcd->state = HC_STATE_RESUMING; 374 hcd->state = HC_STATE_RESUMING;
376 hcd->saw_irq = 0; 375 hcd->saw_irq = 0;
377 retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ,
378 hcd->irq_descr, hcd);
379 if (retval < 0) {
380 dev_err (hcd->self.controller,
381 "can't restore IRQ after resume!\n");
382 usb_hc_died (hcd);
383 return retval;
384 }
385 376
386 retval = hcd->driver->resume (hcd); 377 retval = hcd->driver->resume (hcd);
387 if (!HC_IS_RUNNING (hcd->state)) { 378 if (!HC_IS_RUNNING (hcd->state)) {
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 817620d73841..859aca7be753 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -17,8 +17,6 @@
17 */ 17 */
18 18
19#include <asm/hardware.h> 19#include <asm/hardware.h>
20#include <asm/mach-types.h>
21#include <asm/arch/hardware.h>
22 20
23 21
24extern int usb_disabled(void); 22extern int usb_disabled(void);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5cde76faab93..d8f3ba7ad52e 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -18,7 +18,6 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20 20
21#include <asm/arch/hardware.h>
22#include <asm/arch/mux.h> 21#include <asm/arch/mux.h>
23#include <asm/arch/irqs.h> 22#include <asm/arch/irqs.h>
24#include <asm/arch/gpio.h> 23#include <asm/arch/gpio.h>
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3d9bcf78a9a4..da7d5478f74d 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -20,7 +20,6 @@
20*/ 20*/
21 21
22#include <asm/hardware.h> 22#include <asm/hardware.h>
23#include <asm/mach-types.h>
24#include <asm/hardware/clock.h> 23#include <asm/hardware/clock.h>
25#include <asm/arch/usb-control.h> 24#include <asm/arch/usb-control.h>
26 25
diff --git a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c
index 4a5857c53f11..0bc0b1247a6b 100644
--- a/drivers/usb/media/vicam.c
+++ b/drivers/usb/media/vicam.c
@@ -1148,7 +1148,7 @@ vicam_write_proc_gain(struct file *file, const char *buffer,
1148static void 1148static void
1149vicam_create_proc_root(void) 1149vicam_create_proc_root(void)
1150{ 1150{
1151 vicam_proc_root = create_proc_entry("video/vicam", S_IFDIR, 0); 1151 vicam_proc_root = proc_mkdir("video/vicam", NULL);
1152 1152
1153 if (vicam_proc_root) 1153 if (vicam_proc_root)
1154 vicam_proc_root->owner = THIS_MODULE; 1154 vicam_proc_root->owner = THIS_MODULE;
@@ -1181,7 +1181,7 @@ vicam_create_proc_entry(struct vicam_camera *cam)
1181 1181
1182 sprintf(name, "video%d", cam->vdev.minor); 1182 sprintf(name, "video%d", cam->vdev.minor);
1183 1183
1184 cam->proc_dir = create_proc_entry(name, S_IFDIR, vicam_proc_root); 1184 cam->proc_dir = proc_mkdir(name, vicam_proc_root);
1185 1185
1186 if ( !cam->proc_dir ) 1186 if ( !cam->proc_dir )
1187 return; // FIXME: We should probably return an error here 1187 return; // FIXME: We should probably return an error here
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 773ae11b4a19..1cd942abb580 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -768,6 +768,7 @@ config FB_INTEL
768 select FB_CFB_FILLRECT 768 select FB_CFB_FILLRECT
769 select FB_CFB_COPYAREA 769 select FB_CFB_COPYAREA
770 select FB_CFB_IMAGEBLIT 770 select FB_CFB_IMAGEBLIT
771 select FB_SOFT_CURSOR
771 help 772 help
772 This driver supports the on-board graphics built in to the Intel 773 This driver supports the on-board graphics built in to the Intel
773 830M/845G/852GM/855GM/865G chipsets. 774 830M/845G/852GM/855GM/865G chipsets.
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 046b47860266..8a24a66d9ba8 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -475,7 +475,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
475 */ 475 */
476 476
477 /* Flush PCI buffers ? */ 477 /* Flush PCI buffers ? */
478 tmp = INREG(DEVICE_ID); 478 tmp = INREG16(DEVICE_ID);
479 479
480 local_irq_disable(); 480 local_irq_disable();
481 481
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 59a1b6f85067..097d668c4fe5 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -62,9 +62,9 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
62 OUTPLL(pllSCLK_CNTL, tmp); 62 OUTPLL(pllSCLK_CNTL, tmp);
63 return; 63 return;
64 } 64 }
65 /* RV350 (M10) */ 65 /* RV350 (M10/M11) */
66 if (rinfo->family == CHIP_FAMILY_RV350) { 66 if (rinfo->family == CHIP_FAMILY_RV350) {
67 /* for RV350/M10, no delays are required. */ 67 /* for RV350/M10/M11, no delays are required. */
68 tmp = INPLL(pllSCLK_CNTL2); 68 tmp = INPLL(pllSCLK_CNTL2);
69 tmp |= (SCLK_CNTL2__R300_FORCE_TCL | 69 tmp |= (SCLK_CNTL2__R300_FORCE_TCL |
70 SCLK_CNTL2__R300_FORCE_GA | 70 SCLK_CNTL2__R300_FORCE_GA |
@@ -248,7 +248,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
248 return; 248 return;
249 } 249 }
250 250
251 /* M10 */ 251 /* M10/M11 */
252 if (rinfo->family == CHIP_FAMILY_RV350) { 252 if (rinfo->family == CHIP_FAMILY_RV350) {
253 tmp = INPLL(pllSCLK_CNTL2); 253 tmp = INPLL(pllSCLK_CNTL2);
254 tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL | 254 tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL |
@@ -1155,7 +1155,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
1155 OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) ); 1155 OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) );
1156 OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) ); 1156 OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) );
1157 1157
1158 /* This is the code for the Aluminium PowerBooks M10 */ 1158 /* This is the code for the Aluminium PowerBooks M10 / iBooks M11 */
1159 if (rinfo->family == CHIP_FAMILY_RV350) { 1159 if (rinfo->family == CHIP_FAMILY_RV350) {
1160 u32 sdram_mode_reg = rinfo->save_regs[35]; 1160 u32 sdram_mode_reg = rinfo->save_regs[35];
1161 static u32 default_mrtable[] = 1161 static u32 default_mrtable[] =
@@ -2741,9 +2741,11 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2741 rinfo->pm_mode |= radeon_pm_d2; 2741 rinfo->pm_mode |= radeon_pm_d2;
2742 2742
2743 /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip 2743 /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip
2744 * in some desktop G4s), and Via (M9+ chip on iBook G4) 2744 * in some desktop G4s), Via (M9+ chip on iBook G4) and
2745 * Snowy (M11 chip on iBook G4 manufactured after July 2005)
2745 */ 2746 */
2746 if (!strcmp(rinfo->of_node->name, "ATY,JasperParent")) { 2747 if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") ||
2748 !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) {
2747 rinfo->reinit_func = radeon_reinitialize_M10; 2749 rinfo->reinit_func = radeon_reinitialize_M10;
2748 rinfo->pm_mode |= radeon_pm_off; 2750 rinfo->pm_mode |= radeon_pm_off;
2749 } 2751 }
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 659bc9f62244..01b8b2f78514 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -395,6 +395,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
395 395
396#define INREG8(addr) readb((rinfo->mmio_base)+addr) 396#define INREG8(addr) readb((rinfo->mmio_base)+addr)
397#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) 397#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
398#define INREG16(addr) readw((rinfo->mmio_base)+addr)
399#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
398#define INREG(addr) readl((rinfo->mmio_base)+addr) 400#define INREG(addr) readl((rinfo->mmio_base)+addr)
399#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) 401#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
400 402
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
index 0bea0d8d7821..a085cbf74ecb 100644
--- a/drivers/video/aty/xlinit.c
+++ b/drivers/video/aty/xlinit.c
@@ -253,9 +253,11 @@ int atyfb_xl_init(struct fb_info *info)
253 aty_st_le32(0xFC, 0x00000000, par); 253 aty_st_le32(0xFC, 0x00000000, par);
254 254
255#if defined (CONFIG_FB_ATY_GENERIC_LCD) 255#if defined (CONFIG_FB_ATY_GENERIC_LCD)
256 int i; 256 {
257 for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) { 257 int i;
258 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par); 258
259 for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
260 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
259 } 261 }
260#endif 262#endif
261 263
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 630f2dfa9699..3c72c627e65e 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -19,7 +19,6 @@
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/backlight.h> 20#include <linux/backlight.h>
21 21
22#include <asm/mach-types.h>
23#include <asm/arch/sharpsl.h> 22#include <asm/arch/sharpsl.h>
24 23
25#define CORGI_DEFAULT_INTENSITY 0x1f 24#define CORGI_DEFAULT_INTENSITY 0x1f
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index ae2762cb5608..6992100a508c 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -410,20 +410,21 @@ static void cyblafb_imageblit(struct fb_info *info,
410 out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1)); 410 out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1));
411 411
412 while(index < index_end) { 412 while(index < index_end) {
413 const char *p = image->data + index;
413 for(i=0;i<width_dds;i++) { 414 for(i=0;i<width_dds;i++) {
414 out32(GE9C,*((u32*) ((u32)image->data + index))); 415 out32(GE9C,*(u32*)p);
416 p+=4;
415 index+=4; 417 index+=4;
416 } 418 }
417 switch(width_dbs) { 419 switch(width_dbs) {
418 case 0: break; 420 case 0: break;
419 case 8: out32(GE9C,*((u8*)((u32)image->data+index))); 421 case 8: out32(GE9C,*(u8*)p);
420 index+=1; 422 index+=1;
421 break; 423 break;
422 case 16: out32(GE9C,*((u16*)((u32)image->data+index))); 424 case 16: out32(GE9C,*(u16*)p);
423 index+=2; 425 index+=2;
424 break; 426 break;
425 case 24: out32(GE9C,(u32)(*((u16*)((u32)image->data+index))) | 427 case 24: out32(GE9C,*(u16*)p | *(u8*)(p+2)<<16);
426 (u32)(*((u8*)((u32)image->data+index+2)))<<16);
427 index+=3; 428 index+=3;
428 break; 429 break;
429 } 430 }
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index fda53aac1fc1..689d2586366d 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -44,7 +44,7 @@ static void i810i2c_setscl(void *data, int state)
44{ 44{
45 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 45 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
46 struct i810fb_par *par = chan->par; 46 struct i810fb_par *par = chan->par;
47 u8 *mmio = par->mmio_start_virtual; 47 u8 __iomem *mmio = par->mmio_start_virtual;
48 48
49 i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR | 49 i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
50 SCL_DIR_MASK | SCL_VAL_MASK); 50 SCL_DIR_MASK | SCL_VAL_MASK);
@@ -55,7 +55,7 @@ static void i810i2c_setsda(void *data, int state)
55{ 55{
56 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 56 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
57 struct i810fb_par *par = chan->par; 57 struct i810fb_par *par = chan->par;
58 u8 *mmio = par->mmio_start_virtual; 58 u8 __iomem *mmio = par->mmio_start_virtual;
59 59
60 i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR | 60 i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
61 SDA_DIR_MASK | SDA_VAL_MASK); 61 SDA_DIR_MASK | SDA_VAL_MASK);
@@ -66,7 +66,7 @@ static int i810i2c_getscl(void *data)
66{ 66{
67 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 67 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
68 struct i810fb_par *par = chan->par; 68 struct i810fb_par *par = chan->par;
69 u8 *mmio = par->mmio_start_virtual; 69 u8 __iomem *mmio = par->mmio_start_virtual;
70 70
71 i810_writel(mmio, GPIOB, SCL_DIR_MASK); 71 i810_writel(mmio, GPIOB, SCL_DIR_MASK);
72 i810_writel(mmio, GPIOB, 0); 72 i810_writel(mmio, GPIOB, 0);
@@ -77,7 +77,7 @@ static int i810i2c_getsda(void *data)
77{ 77{
78 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 78 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
79 struct i810fb_par *par = chan->par; 79 struct i810fb_par *par = chan->par;
80 u8 *mmio = par->mmio_start_virtual; 80 u8 __iomem *mmio = par->mmio_start_virtual;
81 81
82 i810_writel(mmio, GPIOB, SDA_DIR_MASK); 82 i810_writel(mmio, GPIOB, SDA_DIR_MASK);
83 i810_writel(mmio, GPIOB, 0); 83 i810_writel(mmio, GPIOB, 0);
@@ -88,7 +88,7 @@ static void i810ddc_setscl(void *data, int state)
88{ 88{
89 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 89 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
90 struct i810fb_par *par = chan->par; 90 struct i810fb_par *par = chan->par;
91 u8 *mmio = par->mmio_start_virtual; 91 u8 __iomem *mmio = par->mmio_start_virtual;
92 92
93 i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR | 93 i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
94 SCL_DIR_MASK | SCL_VAL_MASK); 94 SCL_DIR_MASK | SCL_VAL_MASK);
@@ -99,7 +99,7 @@ static void i810ddc_setsda(void *data, int state)
99{ 99{
100 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 100 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
101 struct i810fb_par *par = chan->par; 101 struct i810fb_par *par = chan->par;
102 u8 *mmio = par->mmio_start_virtual; 102 u8 __iomem *mmio = par->mmio_start_virtual;
103 103
104 i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR | 104 i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
105 SDA_DIR_MASK | SDA_VAL_MASK); 105 SDA_DIR_MASK | SDA_VAL_MASK);
@@ -110,7 +110,7 @@ static int i810ddc_getscl(void *data)
110{ 110{
111 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 111 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
112 struct i810fb_par *par = chan->par; 112 struct i810fb_par *par = chan->par;
113 u8 *mmio = par->mmio_start_virtual; 113 u8 __iomem *mmio = par->mmio_start_virtual;
114 114
115 i810_writel(mmio, GPIOA, SCL_DIR_MASK); 115 i810_writel(mmio, GPIOA, SCL_DIR_MASK);
116 i810_writel(mmio, GPIOA, 0); 116 i810_writel(mmio, GPIOA, 0);
@@ -121,7 +121,7 @@ static int i810ddc_getsda(void *data)
121{ 121{
122 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 122 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
123 struct i810fb_par *par = chan->par; 123 struct i810fb_par *par = chan->par;
124 u8 *mmio = par->mmio_start_virtual; 124 u8 __iomem *mmio = par->mmio_start_virtual;
125 125
126 i810_writel(mmio, GPIOA, SDA_DIR_MASK); 126 i810_writel(mmio, GPIOA, SDA_DIR_MASK);
127 i810_writel(mmio, GPIOA, 0); 127 i810_writel(mmio, GPIOA, 0);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 6c2244cf0e74..1d54d3d6960b 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -36,7 +36,6 @@
36 36
37#include <asm/hardware.h> 37#include <asm/hardware.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/mach-types.h>
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41#include <asm/arch/imxfb.h> 40#include <asm/arch/imxfb.h>
42 41
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index bf62e6ed0382..80a09344f1aa 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -226,7 +226,7 @@ MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
226 226
227static int accel = 1; 227static int accel = 1;
228static int vram = 4; 228static int vram = 4;
229static int hwcursor = 1; 229static int hwcursor = 0;
230static int mtrr = 1; 230static int mtrr = 1;
231static int fixed = 0; 231static int fixed = 0;
232static int noinit = 0; 232static int noinit = 0;
@@ -609,15 +609,9 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
609 dinfo->accel = 0; 609 dinfo->accel = 0;
610 } 610 }
611 611
612 if (MB(voffset) < stolen_size)
613 offset = (stolen_size >> 12);
614 else
615 offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
616
617 /* Framebuffer parameters - Use all the stolen memory if >= vram */ 612 /* Framebuffer parameters - Use all the stolen memory if >= vram */
618 if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) { 613 if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
619 dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size); 614 dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
620 dinfo->fb.offset = 0;
621 dinfo->fbmem_gart = 0; 615 dinfo->fbmem_gart = 0;
622 } else { 616 } else {
623 dinfo->fb.size = MB(vram); 617 dinfo->fb.size = MB(vram);
@@ -648,6 +642,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
648 return -ENODEV; 642 return -ENODEV;
649 } 643 }
650 644
645 if (MB(voffset) < stolen_size)
646 offset = (stolen_size >> 12);
647 else
648 offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
649
651 /* set the mem offsets - set them after the already used pages */ 650 /* set the mem offsets - set them after the already used pages */
652 if (dinfo->accel) { 651 if (dinfo->accel) {
653 dinfo->ring.offset = offset + gtt_info.current_memory; 652 dinfo->ring.offset = offset + gtt_info.current_memory;
@@ -662,10 +661,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
662 + (dinfo->cursor.size >> 12); 661 + (dinfo->cursor.size >> 12);
663 } 662 }
664 663
664 /* Allocate memories (which aren't stolen) */
665 /* Map the fb and MMIO regions */ 665 /* Map the fb and MMIO regions */
666 /* ioremap only up to the end of used aperture */ 666 /* ioremap only up to the end of used aperture */
667 dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache 667 dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
668 (dinfo->aperture.physical, (dinfo->fb.offset << 12) 668 (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12)
669 + dinfo->fb.size); 669 + dinfo->fb.size);
670 if (!dinfo->aperture.virtual) { 670 if (!dinfo->aperture.virtual) {
671 ERR_MSG("Cannot remap FB region.\n"); 671 ERR_MSG("Cannot remap FB region.\n");
@@ -682,7 +682,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
682 return -ENODEV; 682 return -ENODEV;
683 } 683 }
684 684
685 /* Allocate memories (which aren't stolen) */
686 if (dinfo->accel) { 685 if (dinfo->accel) {
687 if (!(dinfo->gtt_ring_mem = 686 if (!(dinfo->gtt_ring_mem =
688 agp_allocate_memory(bridge, dinfo->ring.size >> 12, 687 agp_allocate_memory(bridge, dinfo->ring.size >> 12,
@@ -1484,7 +1483,7 @@ intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
1484#endif 1483#endif
1485 1484
1486 if (!dinfo->hwcursor) 1485 if (!dinfo->hwcursor)
1487 return -ENXIO; 1486 return soft_cursor(info, cursor);
1488 1487
1489 intelfbhw_cursor_hide(dinfo); 1488 intelfbhw_cursor_hide(dinfo);
1490 1489
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 34d4dcc0320a..194eed0a238c 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -260,9 +260,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
260 } 260 }
261 261
262#ifdef CONFIG_CPU_FREQ 262#ifdef CONFIG_CPU_FREQ
263 DPRINTK("dma period = %d ps, clock = %d kHz\n", 263 pr_debug("pxafb: dma period = %d ps, clock = %d kHz\n",
264 pxafb_display_dma_period(var), 264 pxafb_display_dma_period(var),
265 get_clk_frequency_khz(0)); 265 get_clk_frequency_khz(0));
266#endif 266#endif
267 267
268 return 0; 268 return 0;
@@ -270,7 +270,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
270 270
271static inline void pxafb_set_truecolor(u_int is_true_color) 271static inline void pxafb_set_truecolor(u_int is_true_color)
272{ 272{
273 DPRINTK("true_color = %d\n", is_true_color); 273 pr_debug("pxafb: true_color = %d\n", is_true_color);
274 // do your machine-specific setup if needed 274 // do your machine-specific setup if needed
275} 275}
276 276
@@ -284,7 +284,7 @@ static int pxafb_set_par(struct fb_info *info)
284 struct fb_var_screeninfo *var = &info->var; 284 struct fb_var_screeninfo *var = &info->var;
285 unsigned long palette_mem_size; 285 unsigned long palette_mem_size;
286 286
287 DPRINTK("set_par\n"); 287 pr_debug("pxafb: set_par\n");
288 288
289 if (var->bits_per_pixel == 16) 289 if (var->bits_per_pixel == 16)
290 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 290 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
@@ -308,7 +308,7 @@ static int pxafb_set_par(struct fb_info *info)
308 308
309 palette_mem_size = fbi->palette_size * sizeof(u16); 309 palette_mem_size = fbi->palette_size * sizeof(u16);
310 310
311 DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); 311 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
312 312
313 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 313 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
314 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 314 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
@@ -369,7 +369,7 @@ static int pxafb_blank(int blank, struct fb_info *info)
369 struct pxafb_info *fbi = (struct pxafb_info *)info; 369 struct pxafb_info *fbi = (struct pxafb_info *)info;
370 int i; 370 int i;
371 371
372 DPRINTK("pxafb_blank: blank=%d\n", blank); 372 pr_debug("pxafb: blank=%d\n", blank);
373 373
374 switch (blank) { 374 switch (blank) {
375 case FB_BLANK_POWERDOWN: 375 case FB_BLANK_POWERDOWN:
@@ -508,15 +508,15 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
508 u_long flags; 508 u_long flags;
509 u_int lines_per_panel, pcd = get_pcd(var->pixclock); 509 u_int lines_per_panel, pcd = get_pcd(var->pixclock);
510 510
511 DPRINTK("Configuring PXA LCD\n"); 511 pr_debug("pxafb: Configuring PXA LCD\n");
512 512
513 DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n", 513 pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
514 var->xres, var->hsync_len, 514 var->xres, var->hsync_len,
515 var->left_margin, var->right_margin); 515 var->left_margin, var->right_margin);
516 DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n", 516 pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n",
517 var->yres, var->vsync_len, 517 var->yres, var->vsync_len,
518 var->upper_margin, var->lower_margin); 518 var->upper_margin, var->lower_margin);
519 DPRINTK("var: pixclock=%d pcd=%d\n", var->pixclock, pcd); 519 pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
520 520
521#if DEBUG_VAR 521#if DEBUG_VAR
522 if (var->xres < 16 || var->xres > 1024) 522 if (var->xres < 16 || var->xres > 1024)
@@ -589,10 +589,10 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
589 if (pcd) 589 if (pcd)
590 new_regs.lccr3 |= LCCR3_PixClkDiv(pcd); 590 new_regs.lccr3 |= LCCR3_PixClkDiv(pcd);
591 591
592 DPRINTK("nlccr0 = 0x%08x\n", new_regs.lccr0); 592 pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0);
593 DPRINTK("nlccr1 = 0x%08x\n", new_regs.lccr1); 593 pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1);
594 DPRINTK("nlccr2 = 0x%08x\n", new_regs.lccr2); 594 pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2);
595 DPRINTK("nlccr3 = 0x%08x\n", new_regs.lccr3); 595 pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3);
596 596
597 /* Update shadow copy atomically */ 597 /* Update shadow copy atomically */
598 local_irq_save(flags); 598 local_irq_save(flags);
@@ -637,24 +637,24 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
637 } 637 }
638 638
639#if 0 639#if 0
640 DPRINTK("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); 640 pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu);
641 DPRINTK("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); 641 pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu);
642 DPRINTK("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); 642 pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu);
643 DPRINTK("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma); 643 pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
644 DPRINTK("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma); 644 pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
645 DPRINTK("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma); 645 pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
646 646
647 DPRINTK("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr); 647 pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
648 DPRINTK("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr); 648 pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
649 DPRINTK("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr); 649 pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
650 650
651 DPRINTK("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr); 651 pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
652 DPRINTK("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr); 652 pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
653 DPRINTK("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr); 653 pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
654 654
655 DPRINTK("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd); 655 pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
656 DPRINTK("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd); 656 pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
657 DPRINTK("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd); 657 pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
658#endif 658#endif
659 659
660 fbi->reg_lccr0 = new_regs.lccr0; 660 fbi->reg_lccr0 = new_regs.lccr0;
@@ -684,7 +684,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
684 */ 684 */
685static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) 685static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
686{ 686{
687 DPRINTK("backlight o%s\n", on ? "n" : "ff"); 687 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
688 688
689 if (pxafb_backlight_power) 689 if (pxafb_backlight_power)
690 pxafb_backlight_power(on); 690 pxafb_backlight_power(on);
@@ -692,7 +692,7 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
692 692
693static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) 693static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
694{ 694{
695 DPRINTK("LCD power o%s\n", on ? "n" : "ff"); 695 pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff");
696 696
697 if (pxafb_lcd_power) 697 if (pxafb_lcd_power)
698 pxafb_lcd_power(on); 698 pxafb_lcd_power(on);
@@ -740,13 +740,13 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
740 740
741static void pxafb_enable_controller(struct pxafb_info *fbi) 741static void pxafb_enable_controller(struct pxafb_info *fbi)
742{ 742{
743 DPRINTK("Enabling LCD controller\n"); 743 pr_debug("pxafb: Enabling LCD controller\n");
744 DPRINTK("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); 744 pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0);
745 DPRINTK("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); 745 pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1);
746 DPRINTK("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); 746 pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
747 DPRINTK("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); 747 pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
748 DPRINTK("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); 748 pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
749 DPRINTK("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); 749 pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
750 750
751 /* enable LCD controller clock */ 751 /* enable LCD controller clock */
752 pxa_set_cken(CKEN16_LCD, 1); 752 pxa_set_cken(CKEN16_LCD, 1);
@@ -761,19 +761,19 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
761 FDADR1 = fbi->fdadr1; 761 FDADR1 = fbi->fdadr1;
762 LCCR0 |= LCCR0_ENB; 762 LCCR0 |= LCCR0_ENB;
763 763
764 DPRINTK("FDADR0 0x%08x\n", (unsigned int) FDADR0); 764 pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0);
765 DPRINTK("FDADR1 0x%08x\n", (unsigned int) FDADR1); 765 pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1);
766 DPRINTK("LCCR0 0x%08x\n", (unsigned int) LCCR0); 766 pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0);
767 DPRINTK("LCCR1 0x%08x\n", (unsigned int) LCCR1); 767 pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1);
768 DPRINTK("LCCR2 0x%08x\n", (unsigned int) LCCR2); 768 pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2);
769 DPRINTK("LCCR3 0x%08x\n", (unsigned int) LCCR3); 769 pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3);
770} 770}
771 771
772static void pxafb_disable_controller(struct pxafb_info *fbi) 772static void pxafb_disable_controller(struct pxafb_info *fbi)
773{ 773{
774 DECLARE_WAITQUEUE(wait, current); 774 DECLARE_WAITQUEUE(wait, current);
775 775
776 DPRINTK("Disabling LCD controller\n"); 776 pr_debug("pxafb: disabling LCD controller\n");
777 777
778 set_current_state(TASK_UNINTERRUPTIBLE); 778 set_current_state(TASK_UNINTERRUPTIBLE);
779 add_wait_queue(&fbi->ctrlr_wait, &wait); 779 add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1039,7 +1039,7 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
1039 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; 1039 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
1040 1040
1041 palette_mem_size = fbi->palette_size * sizeof(u16); 1041 palette_mem_size = fbi->palette_size * sizeof(u16);
1042 DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); 1042 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
1043 1043
1044 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 1044 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
1045 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 1045 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 22c00be786a8..47f41f70db7a 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -114,15 +114,6 @@ struct pxafb_info {
114#define PXA_NAME "PXA" 114#define PXA_NAME "PXA"
115 115
116/* 116/*
117 * Debug macros
118 */
119#if DEBUG
120# define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
121#else
122# define DPRINTK(fmt, args...)
123#endif
124
125/*
126 * Minimum X and Y resolutions 117 * Minimum X and Y resolutions
127 */ 118 */
128#define MIN_XRES 64 119#define MIN_XRES 64
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 00c0223a352e..5ab79afb53b7 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -228,8 +228,8 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
228 * information 228 * information
229*/ 229*/
230 230
231static int s3c2410fb_activate_var(struct s3c2410fb_info *fbi, 231static void s3c2410fb_activate_var(struct s3c2410fb_info *fbi,
232 struct fb_var_screeninfo *var) 232 struct fb_var_screeninfo *var)
233{ 233{
234 fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK; 234 fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK;
235 235
diff --git a/fs/9p/conv.c b/fs/9p/conv.c
index 1554731bd653..18121af99d3e 100644
--- a/fs/9p/conv.c
+++ b/fs/9p/conv.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * 9P protocol conversion functions 4 * 9P protocol conversion functions
5 * 5 *
6 * Copyright (C) 2004, 2005 by Latchesar Ionkov <lucho@ionkov.net>
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> 7 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> 8 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 * 9 *
@@ -55,66 +56,70 @@ static inline int buf_check_overflow(struct cbuf *buf)
55 return buf->p > buf->ep; 56 return buf->p > buf->ep;
56} 57}
57 58
58static inline void buf_check_size(struct cbuf *buf, int len) 59static inline int buf_check_size(struct cbuf *buf, int len)
59{ 60{
60 if (buf->p+len > buf->ep) { 61 if (buf->p+len > buf->ep) {
61 if (buf->p < buf->ep) { 62 if (buf->p < buf->ep) {
62 eprintk(KERN_ERR, "buffer overflow\n"); 63 eprintk(KERN_ERR, "buffer overflow\n");
63 buf->p = buf->ep + 1; 64 buf->p = buf->ep + 1;
65 return 0;
64 } 66 }
65 } 67 }
68
69 return 1;
66} 70}
67 71
68static inline void *buf_alloc(struct cbuf *buf, int len) 72static inline void *buf_alloc(struct cbuf *buf, int len)
69{ 73{
70 void *ret = NULL; 74 void *ret = NULL;
71 75
72 buf_check_size(buf, len); 76 if (buf_check_size(buf, len)) {
73 ret = buf->p; 77 ret = buf->p;
74 buf->p += len; 78 buf->p += len;
79 }
75 80
76 return ret; 81 return ret;
77} 82}
78 83
79static inline void buf_put_int8(struct cbuf *buf, u8 val) 84static inline void buf_put_int8(struct cbuf *buf, u8 val)
80{ 85{
81 buf_check_size(buf, 1); 86 if (buf_check_size(buf, 1)) {
82 87 buf->p[0] = val;
83 buf->p[0] = val; 88 buf->p++;
84 buf->p++; 89 }
85} 90}
86 91
87static inline void buf_put_int16(struct cbuf *buf, u16 val) 92static inline void buf_put_int16(struct cbuf *buf, u16 val)
88{ 93{
89 buf_check_size(buf, 2); 94 if (buf_check_size(buf, 2)) {
90 95 *(__le16 *) buf->p = cpu_to_le16(val);
91 *(__le16 *) buf->p = cpu_to_le16(val); 96 buf->p += 2;
92 buf->p += 2; 97 }
93} 98}
94 99
95static inline void buf_put_int32(struct cbuf *buf, u32 val) 100static inline void buf_put_int32(struct cbuf *buf, u32 val)
96{ 101{
97 buf_check_size(buf, 4); 102 if (buf_check_size(buf, 4)) {
98 103 *(__le32 *)buf->p = cpu_to_le32(val);
99 *(__le32 *)buf->p = cpu_to_le32(val); 104 buf->p += 4;
100 buf->p += 4; 105 }
101} 106}
102 107
103static inline void buf_put_int64(struct cbuf *buf, u64 val) 108static inline void buf_put_int64(struct cbuf *buf, u64 val)
104{ 109{
105 buf_check_size(buf, 8); 110 if (buf_check_size(buf, 8)) {
106 111 *(__le64 *)buf->p = cpu_to_le64(val);
107 *(__le64 *)buf->p = cpu_to_le64(val); 112 buf->p += 8;
108 buf->p += 8; 113 }
109} 114}
110 115
111static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) 116static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen)
112{ 117{
113 buf_check_size(buf, slen + 2); 118 if (buf_check_size(buf, slen + 2)) {
114 119 buf_put_int16(buf, slen);
115 buf_put_int16(buf, slen); 120 memcpy(buf->p, s, slen);
116 memcpy(buf->p, s, slen); 121 buf->p += slen;
117 buf->p += slen; 122 }
118} 123}
119 124
120static inline void buf_put_string(struct cbuf *buf, const char *s) 125static inline void buf_put_string(struct cbuf *buf, const char *s)
@@ -124,20 +129,20 @@ static inline void buf_put_string(struct cbuf *buf, const char *s)
124 129
125static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) 130static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen)
126{ 131{
127 buf_check_size(buf, datalen); 132 if (buf_check_size(buf, datalen)) {
128 133 memcpy(buf->p, data, datalen);
129 memcpy(buf->p, data, datalen); 134 buf->p += datalen;
130 buf->p += datalen; 135 }
131} 136}
132 137
133static inline u8 buf_get_int8(struct cbuf *buf) 138static inline u8 buf_get_int8(struct cbuf *buf)
134{ 139{
135 u8 ret = 0; 140 u8 ret = 0;
136 141
137 buf_check_size(buf, 1); 142 if (buf_check_size(buf, 1)) {
138 ret = buf->p[0]; 143 ret = buf->p[0];
139 144 buf->p++;
140 buf->p++; 145 }
141 146
142 return ret; 147 return ret;
143} 148}
@@ -146,10 +151,10 @@ static inline u16 buf_get_int16(struct cbuf *buf)
146{ 151{
147 u16 ret = 0; 152 u16 ret = 0;
148 153
149 buf_check_size(buf, 2); 154 if (buf_check_size(buf, 2)) {
150 ret = le16_to_cpu(*(__le16 *)buf->p); 155 ret = le16_to_cpu(*(__le16 *)buf->p);
151 156 buf->p += 2;
152 buf->p += 2; 157 }
153 158
154 return ret; 159 return ret;
155} 160}
@@ -158,10 +163,10 @@ static inline u32 buf_get_int32(struct cbuf *buf)
158{ 163{
159 u32 ret = 0; 164 u32 ret = 0;
160 165
161 buf_check_size(buf, 4); 166 if (buf_check_size(buf, 4)) {
162 ret = le32_to_cpu(*(__le32 *)buf->p); 167 ret = le32_to_cpu(*(__le32 *)buf->p);
163 168 buf->p += 4;
164 buf->p += 4; 169 }
165 170
166 return ret; 171 return ret;
167} 172}
@@ -170,10 +175,10 @@ static inline u64 buf_get_int64(struct cbuf *buf)
170{ 175{
171 u64 ret = 0; 176 u64 ret = 0;
172 177
173 buf_check_size(buf, 8); 178 if (buf_check_size(buf, 8)) {
174 ret = le64_to_cpu(*(__le64 *)buf->p); 179 ret = le64_to_cpu(*(__le64 *)buf->p);
175 180 buf->p += 8;
176 buf->p += 8; 181 }
177 182
178 return ret; 183 return ret;
179} 184}
@@ -181,27 +186,35 @@ static inline u64 buf_get_int64(struct cbuf *buf)
181static inline int 186static inline int
182buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) 187buf_get_string(struct cbuf *buf, char *data, unsigned int datalen)
183{ 188{
189 u16 len = 0;
190
191 len = buf_get_int16(buf);
192 if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) {
193 memcpy(data, buf->p, len);
194 data[len] = 0;
195 buf->p += len;
196 len++;
197 }
184 198
185 u16 len = buf_get_int16(buf); 199 return len;
186 buf_check_size(buf, len);
187 if (len + 1 > datalen)
188 return 0;
189
190 memcpy(data, buf->p, len);
191 data[len] = 0;
192 buf->p += len;
193
194 return len + 1;
195} 200}
196 201
197static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) 202static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf)
198{ 203{
199 char *ret = NULL; 204 char *ret;
200 int n = buf_get_string(buf, sbuf->p, sbuf->ep - sbuf->p); 205 u16 len;
206
207 ret = NULL;
208 len = buf_get_int16(buf);
201 209
202 if (n > 0) { 210 if (!buf_check_overflow(buf) && buf_check_size(buf, len) &&
211 buf_check_size(sbuf, len+1)) {
212
213 memcpy(sbuf->p, buf->p, len);
214 sbuf->p[len] = 0;
203 ret = sbuf->p; 215 ret = sbuf->p;
204 sbuf->p += n; 216 buf->p += len;
217 sbuf->p += len + 1;
205 } 218 }
206 219
207 return ret; 220 return ret;
@@ -209,12 +222,15 @@ static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf)
209 222
210static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) 223static inline int buf_get_data(struct cbuf *buf, void *data, int datalen)
211{ 224{
212 buf_check_size(buf, datalen); 225 int ret = 0;
213 226
214 memcpy(data, buf->p, datalen); 227 if (buf_check_size(buf, datalen)) {
215 buf->p += datalen; 228 memcpy(data, buf->p, datalen);
229 buf->p += datalen;
230 ret = datalen;
231 }
216 232
217 return datalen; 233 return ret;
218} 234}
219 235
220static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, 236static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf,
@@ -223,13 +239,12 @@ static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf,
223 char *ret = NULL; 239 char *ret = NULL;
224 int n = 0; 240 int n = 0;
225 241
226 buf_check_size(dbuf, datalen); 242 if (buf_check_size(dbuf, datalen)) {
227 243 n = buf_get_data(buf, dbuf->p, datalen);
228 n = buf_get_data(buf, dbuf->p, datalen); 244 if (n > 0) {
229 245 ret = dbuf->p;
230 if (n > 0) { 246 dbuf->p += n;
231 ret = dbuf->p; 247 }
232 dbuf->p += n;
233 } 248 }
234 249
235 return ret; 250 return ret;
@@ -636,7 +651,7 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize,
636 break; 651 break;
637 case RWALK: 652 case RWALK:
638 rcall->params.rwalk.nwqid = buf_get_int16(bufp); 653 rcall->params.rwalk.nwqid = buf_get_int16(bufp);
639 rcall->params.rwalk.wqids = buf_alloc(bufp, 654 rcall->params.rwalk.wqids = buf_alloc(dbufp,
640 rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); 655 rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid));
641 if (rcall->params.rwalk.wqids) 656 if (rcall->params.rwalk.wqids)
642 for (i = 0; i < rcall->params.rwalk.nwqid; i++) { 657 for (i = 0; i < rcall->params.rwalk.nwqid; i++) {
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 821c9c4d76aa..d95f8626d170 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -71,21 +71,28 @@ static int v9fs_fid_insert(struct v9fs_fid *fid, struct dentry *dentry)
71 * 71 *
72 */ 72 */
73 73
74struct v9fs_fid *v9fs_fid_create(struct dentry *dentry) 74struct v9fs_fid *v9fs_fid_create(struct dentry *dentry,
75 struct v9fs_session_info *v9ses, int fid, int create)
75{ 76{
76 struct v9fs_fid *new; 77 struct v9fs_fid *new;
77 78
79 dprintk(DEBUG_9P, "fid create dentry %p, fid %d, create %d\n",
80 dentry, fid, create);
81
78 new = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); 82 new = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL);
79 if (new == NULL) { 83 if (new == NULL) {
80 dprintk(DEBUG_ERROR, "Out of Memory\n"); 84 dprintk(DEBUG_ERROR, "Out of Memory\n");
81 return ERR_PTR(-ENOMEM); 85 return ERR_PTR(-ENOMEM);
82 } 86 }
83 87
84 new->fid = -1; 88 new->fid = fid;
89 new->v9ses = v9ses;
85 new->fidopen = 0; 90 new->fidopen = 0;
86 new->fidcreate = 0; 91 new->fidcreate = create;
87 new->fidclunked = 0; 92 new->fidclunked = 0;
88 new->iounit = 0; 93 new->iounit = 0;
94 new->rdir_pos = 0;
95 new->rdir_fcall = NULL;
89 96
90 if (v9fs_fid_insert(new, dentry) == 0) 97 if (v9fs_fid_insert(new, dentry) == 0)
91 return new; 98 return new;
@@ -109,6 +116,59 @@ void v9fs_fid_destroy(struct v9fs_fid *fid)
109} 116}
110 117
111/** 118/**
119 * v9fs_fid_walk_up - walks from the process current directory
120 * up to the specified dentry.
121 */
122static struct v9fs_fid *v9fs_fid_walk_up(struct dentry *dentry)
123{
124 int fidnum, cfidnum, err;
125 struct v9fs_fid *cfid;
126 struct dentry *cde;
127 struct v9fs_session_info *v9ses;
128
129 v9ses = v9fs_inode2v9ses(current->fs->pwd->d_inode);
130 cfid = v9fs_fid_lookup(current->fs->pwd);
131 if (cfid == NULL) {
132 dprintk(DEBUG_ERROR, "process cwd doesn't have a fid\n");
133 return ERR_PTR(-ENOENT);
134 }
135
136 cfidnum = cfid->fid;
137 cde = current->fs->pwd;
138 /* TODO: take advantage of multiwalk */
139
140 fidnum = v9fs_get_idpool(&v9ses->fidpool);
141 if (fidnum < 0) {
142 dprintk(DEBUG_ERROR, "could not get a new fid num\n");
143 err = -ENOENT;
144 goto clunk_fid;
145 }
146
147 while (cde != dentry) {
148 if (cde == cde->d_parent) {
149 dprintk(DEBUG_ERROR, "can't find dentry\n");
150 err = -ENOENT;
151 goto clunk_fid;
152 }
153
154 err = v9fs_t_walk(v9ses, cfidnum, fidnum, "..", NULL);
155 if (err < 0) {
156 dprintk(DEBUG_ERROR, "problem walking to parent\n");
157 goto clunk_fid;
158 }
159
160 cfidnum = fidnum;
161 cde = cde->d_parent;
162 }
163
164 return v9fs_fid_create(dentry, v9ses, fidnum, 0);
165
166clunk_fid:
167 v9fs_t_clunk(v9ses, fidnum, NULL);
168 return ERR_PTR(err);
169}
170
171/**
112 * v9fs_fid_lookup - retrieve the right fid from a particular dentry 172 * v9fs_fid_lookup - retrieve the right fid from a particular dentry
113 * @dentry: dentry to look for fid in 173 * @dentry: dentry to look for fid in
114 * @type: intent of lookup (operation or traversal) 174 * @type: intent of lookup (operation or traversal)
@@ -119,49 +179,25 @@ void v9fs_fid_destroy(struct v9fs_fid *fid)
119 * 179 *
120 */ 180 */
121 181
122struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type) 182struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry)
123{ 183{
124 struct list_head *fid_list = (struct list_head *)dentry->d_fsdata; 184 struct list_head *fid_list = (struct list_head *)dentry->d_fsdata;
125 struct v9fs_fid *current_fid = NULL; 185 struct v9fs_fid *current_fid = NULL;
126 struct v9fs_fid *temp = NULL; 186 struct v9fs_fid *temp = NULL;
127 struct v9fs_fid *return_fid = NULL; 187 struct v9fs_fid *return_fid = NULL;
128 int found_parent = 0;
129 int found_user = 0;
130 188
131 dprintk(DEBUG_9P, " dentry: %s (%p) type %d\n", dentry->d_iname, dentry, 189 dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry);
132 type);
133 190
134 if (fid_list && !list_empty(fid_list)) { 191 if (fid_list) {
135 list_for_each_entry_safe(current_fid, temp, fid_list, list) { 192 list_for_each_entry_safe(current_fid, temp, fid_list, list) {
136 if (current_fid->uid == current->uid) { 193 if (!current_fid->fidcreate) {
137 if (return_fid == NULL) { 194 return_fid = current_fid;
138 if ((type == FID_OP) 195 break;
139 || (!current_fid->fidopen)) {
140 return_fid = current_fid;
141 found_user = 1;
142 }
143 }
144 }
145 if (current_fid->pid == current->real_parent->pid) {
146 if ((return_fid == NULL) || (found_parent)
147 || (found_user)) {
148 if ((type == FID_OP)
149 || (!current_fid->fidopen)) {
150 return_fid = current_fid;
151 found_parent = 1;
152 found_user = 0;
153 }
154 }
155 }
156 if (current_fid->pid == current->pid) {
157 if ((type == FID_OP) ||
158 (!current_fid->fidopen)) {
159 return_fid = current_fid;
160 found_parent = 0;
161 found_user = 0;
162 }
163 } 196 }
164 } 197 }
198
199 if (!return_fid)
200 return_fid = current_fid;
165 } 201 }
166 202
167 /* we are at the root but didn't match */ 203 /* we are at the root but didn't match */
@@ -187,55 +223,33 @@ struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type)
187 223
188/* XXX - there may be some duplication we can get rid of */ 224/* XXX - there may be some duplication we can get rid of */
189 if (par == dentry) { 225 if (par == dentry) {
190 /* we need to fid_lookup the starting point */ 226 return_fid = v9fs_fid_walk_up(dentry);
191 int fidnum = -1; 227 if (IS_ERR(return_fid))
192 int oldfid = -1; 228 return_fid = NULL;
193 int result = -1; 229 }
194 struct v9fs_session_info *v9ses = 230 }
195 v9fs_inode2v9ses(current->fs->pwd->d_inode);
196
197 current_fid =
198 v9fs_fid_lookup(current->fs->pwd, FID_WALK);
199 if (current_fid == NULL) {
200 dprintk(DEBUG_ERROR,
201 "process cwd doesn't have a fid\n");
202 return return_fid;
203 }
204 oldfid = current_fid->fid;
205 par = current->fs->pwd;
206 /* TODO: take advantage of multiwalk */
207 231
208 fidnum = v9fs_get_idpool(&v9ses->fidpool); 232 return return_fid;
209 if (fidnum < 0) { 233}
210 dprintk(DEBUG_ERROR,
211 "could not get a new fid num\n");
212 return return_fid;
213 }
214 234
215 while (par != dentry) { 235struct v9fs_fid *v9fs_fid_get_created(struct dentry *dentry)
216 result = 236{
217 v9fs_t_walk(v9ses, oldfid, fidnum, "..", 237 struct list_head *fid_list;
218 NULL); 238 struct v9fs_fid *fid, *ftmp, *ret;
219 if (result < 0) { 239
220 dprintk(DEBUG_ERROR, 240 dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry);
221 "problem walking to parent\n"); 241 fid_list = (struct list_head *)dentry->d_fsdata;
222 242 ret = NULL;
223 break; 243 if (fid_list) {
224 } 244 list_for_each_entry_safe(fid, ftmp, fid_list, list) {
225 oldfid = fidnum; 245 if (fid->fidcreate && fid->pid == current->pid) {
226 if (par == par->d_parent) { 246 list_del(&fid->list);
227 dprintk(DEBUG_ERROR, 247 ret = fid;
228 "can't find dentry\n"); 248 break;
229 break;
230 }
231 par = par->d_parent;
232 }
233 if (par == dentry) {
234 return_fid = v9fs_fid_create(dentry);
235 return_fid->fid = fidnum;
236 } 249 }
237 } 250 }
238 } 251 }
239 252
240 return return_fid; 253 dprintk(DEBUG_9P, "return %p\n", ret);
254 return ret;
241} 255}
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 7db478ccca36..84c673a44c83 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -25,6 +25,7 @@
25 25
26#define FID_OP 0 26#define FID_OP 0
27#define FID_WALK 1 27#define FID_WALK 1
28#define FID_CREATE 2
28 29
29struct v9fs_fid { 30struct v9fs_fid {
30 struct list_head list; /* list of fids associated with a dentry */ 31 struct list_head list; /* list of fids associated with a dentry */
@@ -52,6 +53,8 @@ struct v9fs_fid {
52 struct v9fs_session_info *v9ses; /* session info for this FID */ 53 struct v9fs_session_info *v9ses; /* session info for this FID */
53}; 54};
54 55
55struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type); 56struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry);
57struct v9fs_fid *v9fs_fid_get_created(struct dentry *);
56void v9fs_fid_destroy(struct v9fs_fid *fid); 58void v9fs_fid_destroy(struct v9fs_fid *fid);
57struct v9fs_fid *v9fs_fid_create(struct dentry *); 59struct v9fs_fid *v9fs_fid_create(struct dentry *,
60 struct v9fs_session_info *v9ses, int fid, int create);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 13bdbbab4387..82303f3bf76f 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -303,7 +303,13 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
303 goto SessCleanUp; 303 goto SessCleanUp;
304 }; 304 };
305 305
306 v9ses->transport = trans_proto; 306 v9ses->transport = kmalloc(sizeof(*v9ses->transport), GFP_KERNEL);
307 if (!v9ses->transport) {
308 retval = -ENOMEM;
309 goto SessCleanUp;
310 }
311
312 memmove(v9ses->transport, trans_proto, sizeof(*v9ses->transport));
307 313
308 if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { 314 if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) {
309 eprintk(KERN_ERR, "problem initializing transport\n"); 315 eprintk(KERN_ERR, "problem initializing transport\n");
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 306c96741f81..a6aa947de0f9 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -67,7 +67,7 @@ static int v9fs_dentry_validate(struct dentry *dentry, struct nameidata *nd)
67 struct dentry *dc = current->fs->pwd; 67 struct dentry *dc = current->fs->pwd;
68 68
69 dprintk(DEBUG_VFS, "dentry: %s (%p)\n", dentry->d_iname, dentry); 69 dprintk(DEBUG_VFS, "dentry: %s (%p)\n", dentry->d_iname, dentry);
70 if (v9fs_fid_lookup(dentry, FID_OP)) { 70 if (v9fs_fid_lookup(dentry)) {
71 dprintk(DEBUG_VFS, "VALID\n"); 71 dprintk(DEBUG_VFS, "VALID\n");
72 return 1; 72 return 1;
73 } 73 }
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index c478a7384186..57a43b8feef5 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -197,21 +197,18 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
197 filemap_fdatawait(inode->i_mapping); 197 filemap_fdatawait(inode->i_mapping);
198 198
199 if (fidnum >= 0) { 199 if (fidnum >= 0) {
200 fid->fidopen--;
201 dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen, 200 dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen,
202 fid->fid); 201 fid->fid);
203 202
204 if (fid->fidopen == 0) { 203 if (v9fs_t_clunk(v9ses, fidnum, NULL))
205 if (v9fs_t_clunk(v9ses, fidnum, NULL)) 204 dprintk(DEBUG_ERROR, "clunk failed\n");
206 dprintk(DEBUG_ERROR, "clunk failed\n");
207 205
208 v9fs_put_idpool(fid->fid, &v9ses->fidpool); 206 v9fs_put_idpool(fid->fid, &v9ses->fidpool);
209 }
210 207
211 kfree(fid->rdir_fcall); 208 kfree(fid->rdir_fcall);
209 kfree(fid);
212 210
213 filp->private_data = NULL; 211 filp->private_data = NULL;
214 v9fs_fid_destroy(fid);
215 } 212 }
216 213
217 d_drop(filp->f_dentry); 214 d_drop(filp->f_dentry);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 1f8ae7d580ab..a4799e971d1c 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -53,30 +53,36 @@
53int v9fs_file_open(struct inode *inode, struct file *file) 53int v9fs_file_open(struct inode *inode, struct file *file)
54{ 54{
55 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); 55 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
56 struct v9fs_fid *v9fid = v9fs_fid_lookup(file->f_dentry, FID_WALK); 56 struct v9fs_fid *v9fid, *fid;
57 struct v9fs_fid *v9newfid = NULL;
58 struct v9fs_fcall *fcall = NULL; 57 struct v9fs_fcall *fcall = NULL;
59 int open_mode = 0; 58 int open_mode = 0;
60 unsigned int iounit = 0; 59 unsigned int iounit = 0;
61 int newfid = -1; 60 int newfid = -1;
62 long result = -1; 61 long result = -1;
63 62
64 dprintk(DEBUG_VFS, "inode: %p file: %p v9fid= %p\n", inode, file, 63 dprintk(DEBUG_VFS, "inode: %p file: %p \n", inode, file);
65 v9fid); 64
65 v9fid = v9fs_fid_get_created(file->f_dentry);
66 if (!v9fid)
67 v9fid = v9fs_fid_lookup(file->f_dentry);
66 68
67 if (!v9fid) { 69 if (!v9fid) {
68 struct dentry *dentry = file->f_dentry;
69 dprintk(DEBUG_ERROR, "Couldn't resolve fid from dentry\n"); 70 dprintk(DEBUG_ERROR, "Couldn't resolve fid from dentry\n");
71 return -EBADF;
72 }
70 73
71 /* XXX - some duplication from lookup, generalize later */ 74 if (!v9fid->fidcreate) {
72 /* basically vfs_lookup is too heavy weight */ 75 fid = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL);
73 v9fid = v9fs_fid_lookup(file->f_dentry, FID_OP); 76 if (fid == NULL) {
74 if (!v9fid) 77 dprintk(DEBUG_ERROR, "Out of Memory\n");
75 return -EBADF; 78 return -ENOMEM;
79 }
76 80
77 v9fid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); 81 fid->fidopen = 0;
78 if (!v9fid) 82 fid->fidcreate = 0;
79 return -EBADF; 83 fid->fidclunked = 0;
84 fid->iounit = 0;
85 fid->v9ses = v9ses;
80 86
81 newfid = v9fs_get_idpool(&v9ses->fidpool); 87 newfid = v9fs_get_idpool(&v9ses->fidpool);
82 if (newfid < 0) { 88 if (newfid < 0) {
@@ -85,58 +91,16 @@ int v9fs_file_open(struct inode *inode, struct file *file)
85 } 91 }
86 92
87 result = 93 result =
88 v9fs_t_walk(v9ses, v9fid->fid, newfid, 94 v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL, NULL);
89 (char *)file->f_dentry->d_name.name, NULL); 95
90 if (result < 0) { 96 if (result < 0) {
91 v9fs_put_idpool(newfid, &v9ses->fidpool); 97 v9fs_put_idpool(newfid, &v9ses->fidpool);
92 dprintk(DEBUG_ERROR, "rewalk didn't work\n"); 98 dprintk(DEBUG_ERROR, "rewalk didn't work\n");
93 return -EBADF; 99 return -EBADF;
94 } 100 }
95 101
96 v9fid = v9fs_fid_create(dentry); 102 fid->fid = newfid;
97 if (v9fid == NULL) { 103 v9fid = fid;
98 dprintk(DEBUG_ERROR, "couldn't insert\n");
99 return -ENOMEM;
100 }
101 v9fid->fid = newfid;
102 }
103
104 if (v9fid->fidcreate) {
105 /* create case */
106 newfid = v9fid->fid;
107 iounit = v9fid->iounit;
108 v9fid->fidcreate = 0;
109 } else {
110 if (!S_ISDIR(inode->i_mode))
111 newfid = v9fid->fid;
112 else {
113 newfid = v9fs_get_idpool(&v9ses->fidpool);
114 if (newfid < 0) {
115 eprintk(KERN_WARNING, "allocation failed\n");
116 return -ENOSPC;
117 }
118 /* This would be a somewhat critical clone */
119 result =
120 v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL,
121 &fcall);
122 if (result < 0) {
123 dprintk(DEBUG_ERROR, "clone error: %s\n",
124 FCALL_ERROR(fcall));
125 kfree(fcall);
126 return result;
127 }
128
129 v9newfid = v9fs_fid_create(file->f_dentry);
130 v9newfid->fid = newfid;
131 v9newfid->qid = v9fid->qid;
132 v9newfid->iounit = v9fid->iounit;
133 v9newfid->fidopen = 0;
134 v9newfid->fidclunked = 0;
135 v9newfid->v9ses = v9ses;
136 v9fid = v9newfid;
137 kfree(fcall);
138 }
139
140 /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ 104 /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */
141 /* translate open mode appropriately */ 105 /* translate open mode appropriately */
142 open_mode = file->f_flags & 0x3; 106 open_mode = file->f_flags & 0x3;
@@ -163,9 +127,13 @@ int v9fs_file_open(struct inode *inode, struct file *file)
163 127
164 iounit = fcall->params.ropen.iounit; 128 iounit = fcall->params.ropen.iounit;
165 kfree(fcall); 129 kfree(fcall);
130 } else {
131 /* create case */
132 newfid = v9fid->fid;
133 iounit = v9fid->iounit;
134 v9fid->fidcreate = 0;
166 } 135 }
167 136
168
169 file->private_data = v9fid; 137 file->private_data = v9fid;
170 138
171 v9fid->rdir_pos = 0; 139 v9fid->rdir_pos = 0;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 0c13fc600049..2b696ae6655a 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -307,7 +307,7 @@ v9fs_create(struct inode *dir,
307 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); 307 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
308 struct super_block *sb = dir->i_sb; 308 struct super_block *sb = dir->i_sb;
309 struct v9fs_fid *dirfid = 309 struct v9fs_fid *dirfid =
310 v9fs_fid_lookup(file_dentry->d_parent, FID_WALK); 310 v9fs_fid_lookup(file_dentry->d_parent);
311 struct v9fs_fid *fid = NULL; 311 struct v9fs_fid *fid = NULL;
312 struct inode *file_inode = NULL; 312 struct inode *file_inode = NULL;
313 struct v9fs_fcall *fcall = NULL; 313 struct v9fs_fcall *fcall = NULL;
@@ -317,6 +317,7 @@ v9fs_create(struct inode *dir,
317 long newfid = -1; 317 long newfid = -1;
318 int result = 0; 318 int result = 0;
319 unsigned int iounit = 0; 319 unsigned int iounit = 0;
320 int wfidno = -1;
320 321
321 perm = unixmode2p9mode(v9ses, perm); 322 perm = unixmode2p9mode(v9ses, perm);
322 323
@@ -350,7 +351,7 @@ v9fs_create(struct inode *dir,
350 if (result < 0) { 351 if (result < 0) {
351 dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); 352 dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall));
352 v9fs_put_idpool(newfid, &v9ses->fidpool); 353 v9fs_put_idpool(newfid, &v9ses->fidpool);
353 newfid = 0; 354 newfid = -1;
354 goto CleanUpFid; 355 goto CleanUpFid;
355 } 356 }
356 357
@@ -369,20 +370,39 @@ v9fs_create(struct inode *dir,
369 qid = fcall->params.rcreate.qid; 370 qid = fcall->params.rcreate.qid;
370 kfree(fcall); 371 kfree(fcall);
371 372
372 fid = v9fs_fid_create(file_dentry); 373 fid = v9fs_fid_create(file_dentry, v9ses, newfid, 1);
374 dprintk(DEBUG_VFS, "fid %p %d\n", fid, fid->fidcreate);
373 if (!fid) { 375 if (!fid) {
374 result = -ENOMEM; 376 result = -ENOMEM;
375 goto CleanUpFid; 377 goto CleanUpFid;
376 } 378 }
377 379
378 fid->fid = newfid;
379 fid->fidopen = 0;
380 fid->fidcreate = 1;
381 fid->qid = qid; 380 fid->qid = qid;
382 fid->iounit = iounit; 381 fid->iounit = iounit;
383 fid->rdir_pos = 0; 382
384 fid->rdir_fcall = NULL; 383 /* walk to the newly created file and put the fid in the dentry */
385 fid->v9ses = v9ses; 384 wfidno = v9fs_get_idpool(&v9ses->fidpool);
385 if (newfid < 0) {
386 eprintk(KERN_WARNING, "no free fids available\n");
387 return -ENOSPC;
388 }
389
390 result = v9fs_t_walk(v9ses, dirfidnum, wfidno,
391 (char *) file_dentry->d_name.name, NULL);
392 if (result < 0) {
393 dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall));
394 v9fs_put_idpool(wfidno, &v9ses->fidpool);
395 wfidno = -1;
396 goto CleanUpFid;
397 }
398
399 if (!v9fs_fid_create(file_dentry, v9ses, wfidno, 0)) {
400 if (!v9fs_t_clunk(v9ses, newfid, &fcall)) {
401 v9fs_put_idpool(wfidno, &v9ses->fidpool);
402 }
403
404 goto CleanUpFid;
405 }
386 406
387 if ((perm & V9FS_DMSYMLINK) || (perm & V9FS_DMLINK) || 407 if ((perm & V9FS_DMSYMLINK) || (perm & V9FS_DMLINK) ||
388 (perm & V9FS_DMNAMEDPIPE) || (perm & V9FS_DMSOCKET) || 408 (perm & V9FS_DMNAMEDPIPE) || (perm & V9FS_DMSOCKET) ||
@@ -410,11 +430,11 @@ v9fs_create(struct inode *dir,
410 d_instantiate(file_dentry, file_inode); 430 d_instantiate(file_dentry, file_inode);
411 431
412 if (perm & V9FS_DMDIR) { 432 if (perm & V9FS_DMDIR) {
413 if (v9fs_t_clunk(v9ses, newfid, &fcall)) 433 if (!v9fs_t_clunk(v9ses, newfid, &fcall))
434 v9fs_put_idpool(newfid, &v9ses->fidpool);
435 else
414 dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n", 436 dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n",
415 FCALL_ERROR(fcall)); 437 FCALL_ERROR(fcall));
416
417 v9fs_put_idpool(newfid, &v9ses->fidpool);
418 kfree(fcall); 438 kfree(fcall);
419 fid->fidopen = 0; 439 fid->fidopen = 0;
420 fid->fidcreate = 0; 440 fid->fidcreate = 0;
@@ -426,12 +446,22 @@ v9fs_create(struct inode *dir,
426 CleanUpFid: 446 CleanUpFid:
427 kfree(fcall); 447 kfree(fcall);
428 448
429 if (newfid) { 449 if (newfid >= 0) {
430 if (v9fs_t_clunk(v9ses, newfid, &fcall)) 450 if (!v9fs_t_clunk(v9ses, newfid, &fcall))
451 v9fs_put_idpool(newfid, &v9ses->fidpool);
452 else
453 dprintk(DEBUG_ERROR, "clunk failed: %s\n",
454 FCALL_ERROR(fcall));
455
456 kfree(fcall);
457 }
458 if (wfidno >= 0) {
459 if (!v9fs_t_clunk(v9ses, wfidno, &fcall))
460 v9fs_put_idpool(wfidno, &v9ses->fidpool);
461 else
431 dprintk(DEBUG_ERROR, "clunk failed: %s\n", 462 dprintk(DEBUG_ERROR, "clunk failed: %s\n",
432 FCALL_ERROR(fcall)); 463 FCALL_ERROR(fcall));
433 464
434 v9fs_put_idpool(newfid, &v9ses->fidpool);
435 kfree(fcall); 465 kfree(fcall);
436 } 466 }
437 return result; 467 return result;
@@ -461,7 +491,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
461 file_inode = file->d_inode; 491 file_inode = file->d_inode;
462 sb = file_inode->i_sb; 492 sb = file_inode->i_sb;
463 v9ses = v9fs_inode2v9ses(file_inode); 493 v9ses = v9fs_inode2v9ses(file_inode);
464 v9fid = v9fs_fid_lookup(file, FID_OP); 494 v9fid = v9fs_fid_lookup(file);
465 495
466 if (!v9fid) { 496 if (!v9fid) {
467 dprintk(DEBUG_ERROR, 497 dprintk(DEBUG_ERROR,
@@ -545,7 +575,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
545 575
546 sb = dir->i_sb; 576 sb = dir->i_sb;
547 v9ses = v9fs_inode2v9ses(dir); 577 v9ses = v9fs_inode2v9ses(dir);
548 dirfid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); 578 dirfid = v9fs_fid_lookup(dentry->d_parent);
549 579
550 if (!dirfid) { 580 if (!dirfid) {
551 dprintk(DEBUG_ERROR, "no dirfid\n"); 581 dprintk(DEBUG_ERROR, "no dirfid\n");
@@ -573,7 +603,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
573 v9fs_put_idpool(newfid, &v9ses->fidpool); 603 v9fs_put_idpool(newfid, &v9ses->fidpool);
574 if (result == -ENOENT) { 604 if (result == -ENOENT) {
575 d_add(dentry, NULL); 605 d_add(dentry, NULL);
576 dprintk(DEBUG_ERROR, 606 dprintk(DEBUG_VFS,
577 "Return negative dentry %p count %d\n", 607 "Return negative dentry %p count %d\n",
578 dentry, atomic_read(&dentry->d_count)); 608 dentry, atomic_read(&dentry->d_count));
579 return NULL; 609 return NULL;
@@ -601,16 +631,13 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
601 631
602 inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid); 632 inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid);
603 633
604 fid = v9fs_fid_create(dentry); 634 fid = v9fs_fid_create(dentry, v9ses, newfid, 0);
605 if (fid == NULL) { 635 if (fid == NULL) {
606 dprintk(DEBUG_ERROR, "couldn't insert\n"); 636 dprintk(DEBUG_ERROR, "couldn't insert\n");
607 result = -ENOMEM; 637 result = -ENOMEM;
608 goto FreeFcall; 638 goto FreeFcall;
609 } 639 }
610 640
611 fid->fid = newfid;
612 fid->fidopen = 0;
613 fid->v9ses = v9ses;
614 fid->qid = fcall->params.rstat.stat->qid; 641 fid->qid = fcall->params.rstat.stat->qid;
615 642
616 dentry->d_op = &v9fs_dentry_operations; 643 dentry->d_op = &v9fs_dentry_operations;
@@ -665,11 +692,11 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
665{ 692{
666 struct inode *old_inode = old_dentry->d_inode; 693 struct inode *old_inode = old_dentry->d_inode;
667 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(old_inode); 694 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(old_inode);
668 struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_WALK); 695 struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry);
669 struct v9fs_fid *olddirfid = 696 struct v9fs_fid *olddirfid =
670 v9fs_fid_lookup(old_dentry->d_parent, FID_WALK); 697 v9fs_fid_lookup(old_dentry->d_parent);
671 struct v9fs_fid *newdirfid = 698 struct v9fs_fid *newdirfid =
672 v9fs_fid_lookup(new_dentry->d_parent, FID_WALK); 699 v9fs_fid_lookup(new_dentry->d_parent);
673 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); 700 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
674 struct v9fs_fcall *fcall = NULL; 701 struct v9fs_fcall *fcall = NULL;
675 int fid = -1; 702 int fid = -1;
@@ -744,7 +771,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
744{ 771{
745 struct v9fs_fcall *fcall = NULL; 772 struct v9fs_fcall *fcall = NULL;
746 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); 773 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode);
747 struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); 774 struct v9fs_fid *fid = v9fs_fid_lookup(dentry);
748 int err = -EPERM; 775 int err = -EPERM;
749 776
750 dprintk(DEBUG_VFS, "dentry: %p\n", dentry); 777 dprintk(DEBUG_VFS, "dentry: %p\n", dentry);
@@ -778,7 +805,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
778static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) 805static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
779{ 806{
780 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); 807 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode);
781 struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); 808 struct v9fs_fid *fid = v9fs_fid_lookup(dentry);
782 struct v9fs_fcall *fcall = NULL; 809 struct v9fs_fcall *fcall = NULL;
783 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); 810 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
784 int res = -EPERM; 811 int res = -EPERM;
@@ -960,7 +987,7 @@ v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
960 if (retval != 0) 987 if (retval != 0)
961 goto FreeFcall; 988 goto FreeFcall;
962 989
963 newfid = v9fs_fid_lookup(dentry, FID_OP); 990 newfid = v9fs_fid_lookup(dentry);
964 991
965 /* issue a twstat */ 992 /* issue a twstat */
966 v9fs_blank_mistat(v9ses, mistat); 993 v9fs_blank_mistat(v9ses, mistat);
@@ -1004,7 +1031,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
1004 1031
1005 struct v9fs_fcall *fcall = NULL; 1032 struct v9fs_fcall *fcall = NULL;
1006 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); 1033 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode);
1007 struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); 1034 struct v9fs_fid *fid = v9fs_fid_lookup(dentry);
1008 1035
1009 if (!fid) { 1036 if (!fid) {
1010 dprintk(DEBUG_ERROR, "could not resolve fid from dentry\n"); 1037 dprintk(DEBUG_ERROR, "could not resolve fid from dentry\n");
@@ -1063,8 +1090,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
1063 int ret; 1090 int ret;
1064 char *link = __getname(); 1091 char *link = __getname();
1065 1092
1066 if (strlen(link) < buflen) 1093 if (buflen > PATH_MAX)
1067 buflen = strlen(link); 1094 buflen = PATH_MAX;
1068 1095
1069 dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); 1096 dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
1070 1097
@@ -1148,7 +1175,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
1148 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); 1175 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
1149 struct v9fs_fcall *fcall = NULL; 1176 struct v9fs_fcall *fcall = NULL;
1150 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); 1177 struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
1151 struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_OP); 1178 struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry);
1152 struct v9fs_fid *newfid = NULL; 1179 struct v9fs_fid *newfid = NULL;
1153 char *symname = __getname(); 1180 char *symname = __getname();
1154 1181
@@ -1168,7 +1195,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
1168 if (retval != 0) 1195 if (retval != 0)
1169 goto FreeMem; 1196 goto FreeMem;
1170 1197
1171 newfid = v9fs_fid_lookup(dentry, FID_OP); 1198 newfid = v9fs_fid_lookup(dentry);
1172 if (!newfid) { 1199 if (!newfid) {
1173 dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n"); 1200 dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n");
1174 goto FreeMem; 1201 goto FreeMem;
@@ -1246,7 +1273,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1246 if (retval != 0) 1273 if (retval != 0)
1247 goto FreeMem; 1274 goto FreeMem;
1248 1275
1249 newfid = v9fs_fid_lookup(dentry, FID_OP); 1276 newfid = v9fs_fid_lookup(dentry);
1250 if (!newfid) { 1277 if (!newfid) {
1251 dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n"); 1278 dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n");
1252 retval = -EINVAL; 1279 retval = -EINVAL;
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 868f350b2c5f..82c5b0084079 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -129,8 +129,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type
129 129
130 if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { 130 if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) {
131 dprintk(DEBUG_ERROR, "problem initiating session\n"); 131 dprintk(DEBUG_ERROR, "problem initiating session\n");
132 retval = newfid; 132 return ERR_PTR(newfid);
133 goto free_session;
134 } 133 }
135 134
136 sb = sget(fs_type, NULL, v9fs_set_super, v9ses); 135 sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
@@ -150,28 +149,24 @@ static struct super_block *v9fs_get_sb(struct file_system_type
150 149
151 if (!root) { 150 if (!root) {
152 retval = -ENOMEM; 151 retval = -ENOMEM;
153 goto release_inode; 152 goto put_back_sb;
154 } 153 }
155 154
156 sb->s_root = root; 155 sb->s_root = root;
157 156
158 /* Setup the Root Inode */
159 root_fid = v9fs_fid_create(root);
160 if (root_fid == NULL) {
161 retval = -ENOMEM;
162 goto release_dentry;
163 }
164
165 root_fid->fidopen = 0;
166 root_fid->v9ses = v9ses;
167
168 stat_result = v9fs_t_stat(v9ses, newfid, &fcall); 157 stat_result = v9fs_t_stat(v9ses, newfid, &fcall);
169 if (stat_result < 0) { 158 if (stat_result < 0) {
170 dprintk(DEBUG_ERROR, "stat error\n"); 159 dprintk(DEBUG_ERROR, "stat error\n");
171 v9fs_t_clunk(v9ses, newfid, NULL); 160 v9fs_t_clunk(v9ses, newfid, NULL);
172 v9fs_put_idpool(newfid, &v9ses->fidpool); 161 v9fs_put_idpool(newfid, &v9ses->fidpool);
173 } else { 162 } else {
174 root_fid->fid = newfid; 163 /* Setup the Root Inode */
164 root_fid = v9fs_fid_create(root, v9ses, newfid, 0);
165 if (root_fid == NULL) {
166 retval = -ENOMEM;
167 goto put_back_sb;
168 }
169
175 root_fid->qid = fcall->params.rstat.stat->qid; 170 root_fid->qid = fcall->params.rstat.stat->qid;
176 root->d_inode->i_ino = 171 root->d_inode->i_ino =
177 v9fs_qid2ino(&fcall->params.rstat.stat->qid); 172 v9fs_qid2ino(&fcall->params.rstat.stat->qid);
@@ -182,25 +177,15 @@ static struct super_block *v9fs_get_sb(struct file_system_type
182 177
183 if (stat_result < 0) { 178 if (stat_result < 0) {
184 retval = stat_result; 179 retval = stat_result;
185 goto release_dentry; 180 goto put_back_sb;
186 } 181 }
187 182
188 return sb; 183 return sb;
189 184
190 release_dentry: 185put_back_sb:
191 dput(sb->s_root); 186 /* deactivate_super calls v9fs_kill_super which will frees the rest */
192
193 release_inode:
194 iput(inode);
195
196 put_back_sb:
197 up_write(&sb->s_umount); 187 up_write(&sb->s_umount);
198 deactivate_super(sb); 188 deactivate_super(sb);
199 v9fs_session_close(v9ses);
200
201 free_session:
202 kfree(v9ses);
203
204 return ERR_PTR(retval); 189 return ERR_PTR(retval);
205} 190}
206 191
diff --git a/fs/Kconfig b/fs/Kconfig
index 068ccea2f184..48f5422cb19a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -472,6 +472,9 @@ config FUSE_FS
472 utilities is available from the FUSE homepage: 472 utilities is available from the FUSE homepage:
473 <http://fuse.sourceforge.net/> 473 <http://fuse.sourceforge.net/>
474 474
475 See <file:Documentation/filesystems/fuse.txt> for more information.
476 See <file:Documentation/Changes> for needed library/utility version.
477
475 If you want to develop a userspace FS, or if you want to use 478 If you want to develop a userspace FS, or if you want to use
476 a filesystem based on FUSE, answer Y or M. 479 a filesystem based on FUSE, answer Y or M.
477 480
diff --git a/fs/aio.c b/fs/aio.c
index 0e11e31dbb77..d6b1551342b7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -741,19 +741,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
741 ret = retry(iocb); 741 ret = retry(iocb);
742 current->io_wait = NULL; 742 current->io_wait = NULL;
743 743
744 if (-EIOCBRETRY != ret) { 744 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
745 if (-EIOCBQUEUED != ret) { 745 BUG_ON(!list_empty(&iocb->ki_wait.task_list));
746 BUG_ON(!list_empty(&iocb->ki_wait.task_list)); 746 aio_complete(iocb, ret, 0);
747 aio_complete(iocb, ret, 0);
748 /* must not access the iocb after this */
749 }
750 } else {
751 /*
752 * Issue an additional retry to avoid waiting forever if
753 * no waits were queued (e.g. in case of a short read).
754 */
755 if (list_empty(&iocb->ki_wait.task_list))
756 kiocbSetKicked(iocb);
757 } 747 }
758out: 748out:
759 spin_lock_irq(&ctx->ctx_lock); 749 spin_lock_irq(&ctx->ctx_lock);
@@ -899,16 +889,24 @@ static void aio_kick_handler(void *data)
899 * and if required activate the aio work queue to process 889 * and if required activate the aio work queue to process
900 * it 890 * it
901 */ 891 */
902static void queue_kicked_iocb(struct kiocb *iocb) 892static void try_queue_kicked_iocb(struct kiocb *iocb)
903{ 893{
904 struct kioctx *ctx = iocb->ki_ctx; 894 struct kioctx *ctx = iocb->ki_ctx;
905 unsigned long flags; 895 unsigned long flags;
906 int run = 0; 896 int run = 0;
907 897
908 WARN_ON((!list_empty(&iocb->ki_wait.task_list))); 898 /* We're supposed to be the only path putting the iocb back on the run
899 * list. If we find that the iocb is *back* on a wait queue already
900 * than retry has happened before we could queue the iocb. This also
901 * means that the retry could have completed and freed our iocb, no
902 * good. */
903 BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
909 904
910 spin_lock_irqsave(&ctx->ctx_lock, flags); 905 spin_lock_irqsave(&ctx->ctx_lock, flags);
911 run = __queue_kicked_iocb(iocb); 906 /* set this inside the lock so that we can't race with aio_run_iocb()
907 * testing it and putting the iocb on the run list under the lock */
908 if (!kiocbTryKick(iocb))
909 run = __queue_kicked_iocb(iocb);
912 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 910 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
913 if (run) 911 if (run)
914 aio_queue_work(ctx); 912 aio_queue_work(ctx);
@@ -931,10 +929,7 @@ void fastcall kick_iocb(struct kiocb *iocb)
931 return; 929 return;
932 } 930 }
933 931
934 /* If its already kicked we shouldn't queue it again */ 932 try_queue_kicked_iocb(iocb);
935 if (!kiocbTryKick(iocb)) {
936 queue_kicked_iocb(iocb);
937 }
938} 933}
939EXPORT_SYMBOL(kick_iocb); 934EXPORT_SYMBOL(kick_iocb);
940 935
@@ -1322,8 +1317,11 @@ asmlinkage long sys_io_destroy(aio_context_t ctx)
1322} 1317}
1323 1318
1324/* 1319/*
1325 * Default retry method for aio_read (also used for first time submit) 1320 * aio_p{read,write} are the default ki_retry methods for
1326 * Responsible for updating iocb state as retries progress 1321 * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially
1322 * multiple calls to f_op->aio_read(). They loop around partial progress
1323 * instead of returning -EIOCBRETRY because they don't have the means to call
1324 * kick_iocb().
1327 */ 1325 */
1328static ssize_t aio_pread(struct kiocb *iocb) 1326static ssize_t aio_pread(struct kiocb *iocb)
1329{ 1327{
@@ -1332,25 +1330,25 @@ static ssize_t aio_pread(struct kiocb *iocb)
1332 struct inode *inode = mapping->host; 1330 struct inode *inode = mapping->host;
1333 ssize_t ret = 0; 1331 ssize_t ret = 0;
1334 1332
1335 ret = file->f_op->aio_read(iocb, iocb->ki_buf, 1333 do {
1336 iocb->ki_left, iocb->ki_pos); 1334 ret = file->f_op->aio_read(iocb, iocb->ki_buf,
1335 iocb->ki_left, iocb->ki_pos);
1336 /*
1337 * Can't just depend on iocb->ki_left to determine
1338 * whether we are done. This may have been a short read.
1339 */
1340 if (ret > 0) {
1341 iocb->ki_buf += ret;
1342 iocb->ki_left -= ret;
1343 }
1337 1344
1338 /*
1339 * Can't just depend on iocb->ki_left to determine
1340 * whether we are done. This may have been a short read.
1341 */
1342 if (ret > 0) {
1343 iocb->ki_buf += ret;
1344 iocb->ki_left -= ret;
1345 /* 1345 /*
1346 * For pipes and sockets we return once we have 1346 * For pipes and sockets we return once we have some data; for
1347 * some data; for regular files we retry till we 1347 * regular files we retry till we complete the entire read or
1348 * complete the entire read or find that we can't 1348 * find that we can't read any more data (e.g short reads).
1349 * read any more data (e.g short reads).
1350 */ 1349 */
1351 if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)) 1350 } while (ret > 0 && iocb->ki_left > 0 &&
1352 ret = -EIOCBRETRY; 1351 !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode));
1353 }
1354 1352
1355 /* This means we must have transferred all that we could */ 1353 /* This means we must have transferred all that we could */
1356 /* No need to retry anymore */ 1354 /* No need to retry anymore */
@@ -1360,27 +1358,21 @@ static ssize_t aio_pread(struct kiocb *iocb)
1360 return ret; 1358 return ret;
1361} 1359}
1362 1360
1363/* 1361/* see aio_pread() */
1364 * Default retry method for aio_write (also used for first time submit)
1365 * Responsible for updating iocb state as retries progress
1366 */
1367static ssize_t aio_pwrite(struct kiocb *iocb) 1362static ssize_t aio_pwrite(struct kiocb *iocb)
1368{ 1363{
1369 struct file *file = iocb->ki_filp; 1364 struct file *file = iocb->ki_filp;
1370 ssize_t ret = 0; 1365 ssize_t ret = 0;
1371 1366
1372 ret = file->f_op->aio_write(iocb, iocb->ki_buf, 1367 do {
1373 iocb->ki_left, iocb->ki_pos); 1368 ret = file->f_op->aio_write(iocb, iocb->ki_buf,
1374 1369 iocb->ki_left, iocb->ki_pos);
1375 if (ret > 0) { 1370 if (ret > 0) {
1376 iocb->ki_buf += ret; 1371 iocb->ki_buf += ret;
1377 iocb->ki_left -= ret; 1372 iocb->ki_left -= ret;
1378 1373 }
1379 ret = -EIOCBRETRY; 1374 } while (ret > 0 && iocb->ki_left > 0);
1380 }
1381 1375
1382 /* This means we must have transferred all that we could */
1383 /* No need to retry anymore */
1384 if ((ret == 0) || (iocb->ki_left == 0)) 1376 if ((ret == 0) || (iocb->ki_left == 0))
1385 ret = iocb->ki_nbytes - iocb->ki_left; 1377 ret = iocb->ki_nbytes - iocb->ki_left;
1386 1378
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8cc23e7d0d5d..1ebf7dafc1d7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -781,6 +781,8 @@ static int cifs_oplock_thread(void * dummyarg)
781 781
782 oplockThread = current; 782 oplockThread = current;
783 do { 783 do {
784 if (try_to_freeze())
785 continue;
784 set_current_state(TASK_INTERRUPTIBLE); 786 set_current_state(TASK_INTERRUPTIBLE);
785 787
786 schedule_timeout(1*HZ); 788 schedule_timeout(1*HZ);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2335f14a1583..47360156cc54 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -344,6 +344,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
344 } 344 }
345 345
346 while (server->tcpStatus != CifsExiting) { 346 while (server->tcpStatus != CifsExiting) {
347 if (try_to_freeze())
348 continue;
347 if (bigbuf == NULL) { 349 if (bigbuf == NULL) {
348 bigbuf = cifs_buf_get(); 350 bigbuf = cifs_buf_get();
349 if(bigbuf == NULL) { 351 if(bigbuf == NULL) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 403b90a1213d..4284cd31eba6 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -101,6 +101,10 @@
101/* Maximum number of poll wake up nests we are allowing */ 101/* Maximum number of poll wake up nests we are allowing */
102#define EP_MAX_POLLWAKE_NESTS 4 102#define EP_MAX_POLLWAKE_NESTS 4
103 103
104/* Maximum msec timeout value storeable in a long int */
105#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
106
107
104struct epoll_filefd { 108struct epoll_filefd {
105 struct file *file; 109 struct file *file;
106 int fd; 110 int fd;
@@ -1506,8 +1510,8 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1506 * and the overflow condition. The passed timeout is in milliseconds, 1510 * and the overflow condition. The passed timeout is in milliseconds,
1507 * that why (t * HZ) / 1000. 1511 * that why (t * HZ) / 1000.
1508 */ 1512 */
1509 jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ? 1513 jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
1510 MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000; 1514 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
1511 1515
1512retry: 1516retry:
1513 write_lock_irqsave(&ep->lock, flags); 1517 write_lock_irqsave(&ep->lock, flags);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c8d07030c897..e2d6208633a7 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -605,27 +605,28 @@ got:
605 insert_inode_hash(inode); 605 insert_inode_hash(inode);
606 606
607 if (DQUOT_ALLOC_INODE(inode)) { 607 if (DQUOT_ALLOC_INODE(inode)) {
608 DQUOT_DROP(inode);
609 err = -ENOSPC; 608 err = -ENOSPC;
610 goto fail2; 609 goto fail_drop;
611 } 610 }
611
612 err = ext2_init_acl(inode, dir); 612 err = ext2_init_acl(inode, dir);
613 if (err) { 613 if (err)
614 DQUOT_FREE_INODE(inode); 614 goto fail_free_drop;
615 DQUOT_DROP(inode); 615
616 goto fail2;
617 }
618 err = ext2_init_security(inode,dir); 616 err = ext2_init_security(inode,dir);
619 if (err) { 617 if (err)
620 DQUOT_FREE_INODE(inode); 618 goto fail_free_drop;
621 goto fail2; 619
622 }
623 mark_inode_dirty(inode); 620 mark_inode_dirty(inode);
624 ext2_debug("allocating inode %lu\n", inode->i_ino); 621 ext2_debug("allocating inode %lu\n", inode->i_ino);
625 ext2_preread_inode(inode); 622 ext2_preread_inode(inode);
626 return inode; 623 return inode;
627 624
628fail2: 625fail_free_drop:
626 DQUOT_FREE_INODE(inode);
627
628fail_drop:
629 DQUOT_DROP(inode);
629 inode->i_flags |= S_NOQUOTA; 630 inode->i_flags |= S_NOQUOTA;
630 inode->i_nlink = 0; 631 inode->i_nlink = 0;
631 iput(inode); 632 iput(inode);
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index e463dca008e4..0213db4911a2 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1410,7 +1410,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1410 unsigned long desc_count; 1410 unsigned long desc_count;
1411 struct ext3_group_desc *gdp; 1411 struct ext3_group_desc *gdp;
1412 int i; 1412 int i;
1413 unsigned long ngroups; 1413 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1414#ifdef EXT3FS_DEBUG 1414#ifdef EXT3FS_DEBUG
1415 struct ext3_super_block *es; 1415 struct ext3_super_block *es;
1416 unsigned long bitmap_count, x; 1416 unsigned long bitmap_count, x;
@@ -1421,7 +1421,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1421 desc_count = 0; 1421 desc_count = 0;
1422 bitmap_count = 0; 1422 bitmap_count = 0;
1423 gdp = NULL; 1423 gdp = NULL;
1424 for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { 1424
1425 for (i = 0; i < ngroups; i++) {
1425 gdp = ext3_get_group_desc(sb, i, NULL); 1426 gdp = ext3_get_group_desc(sb, i, NULL);
1426 if (!gdp) 1427 if (!gdp)
1427 continue; 1428 continue;
@@ -1443,7 +1444,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1443 return bitmap_count; 1444 return bitmap_count;
1444#else 1445#else
1445 desc_count = 0; 1446 desc_count = 0;
1446 ngroups = EXT3_SB(sb)->s_groups_count;
1447 smp_rmb(); 1447 smp_rmb();
1448 for (i = 0; i < ngroups; i++) { 1448 for (i = 0; i < ngroups; i++) {
1449 gdp = ext3_get_group_desc(sb, i, NULL); 1449 gdp = ext3_get_group_desc(sb, i, NULL);
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 96552769d039..6549945f9ac1 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -597,27 +597,22 @@ got:
597 597
598 ret = inode; 598 ret = inode;
599 if(DQUOT_ALLOC_INODE(inode)) { 599 if(DQUOT_ALLOC_INODE(inode)) {
600 DQUOT_DROP(inode);
601 err = -EDQUOT; 600 err = -EDQUOT;
602 goto fail2; 601 goto fail_drop;
603 } 602 }
603
604 err = ext3_init_acl(handle, inode, dir); 604 err = ext3_init_acl(handle, inode, dir);
605 if (err) { 605 if (err)
606 DQUOT_FREE_INODE(inode); 606 goto fail_free_drop;
607 DQUOT_DROP(inode); 607
608 goto fail2;
609 }
610 err = ext3_init_security(handle,inode, dir); 608 err = ext3_init_security(handle,inode, dir);
611 if (err) { 609 if (err)
612 DQUOT_FREE_INODE(inode); 610 goto fail_free_drop;
613 goto fail2; 611
614 }
615 err = ext3_mark_inode_dirty(handle, inode); 612 err = ext3_mark_inode_dirty(handle, inode);
616 if (err) { 613 if (err) {
617 ext3_std_error(sb, err); 614 ext3_std_error(sb, err);
618 DQUOT_FREE_INODE(inode); 615 goto fail_free_drop;
619 DQUOT_DROP(inode);
620 goto fail2;
621 } 616 }
622 617
623 ext3_debug("allocating inode %lu\n", inode->i_ino); 618 ext3_debug("allocating inode %lu\n", inode->i_ino);
@@ -631,7 +626,11 @@ really_out:
631 brelse(bitmap_bh); 626 brelse(bitmap_bh);
632 return ret; 627 return ret;
633 628
634fail2: 629fail_free_drop:
630 DQUOT_FREE_INODE(inode);
631
632fail_drop:
633 DQUOT_DROP(inode);
635 inode->i_flags |= S_NOQUOTA; 634 inode->i_flags |= S_NOQUOTA;
636 inode->i_nlink = 0; 635 inode->i_nlink = 0;
637 iput(inode); 636 iput(inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 2c9f81278d5d..57f79106267d 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -242,7 +242,7 @@ static int setup_new_group_blocks(struct super_block *sb,
242 i < sbi->s_itb_per_group; i++, bit++, block++) { 242 i < sbi->s_itb_per_group; i++, bit++, block++) {
243 struct buffer_head *it; 243 struct buffer_head *it;
244 244
245 ext3_debug("clear inode block %#04x (+%ld)\n", block, bit); 245 ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
246 if (IS_ERR(it = bclean(handle, sb, block))) { 246 if (IS_ERR(it = bclean(handle, sb, block))) {
247 err = PTR_ERR(it); 247 err = PTR_ERR(it);
248 goto exit_bh; 248 goto exit_bh;
@@ -643,8 +643,8 @@ static void update_backups(struct super_block *sb,
643 break; 643 break;
644 644
645 bh = sb_getblk(sb, group * bpg + blk_off); 645 bh = sb_getblk(sb, group * bpg + blk_off);
646 ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n", 646 ext3_debug("update metadata backup %#04lx\n",
647 bh->b_blocknr); 647 (unsigned long)bh->b_blocknr);
648 if ((err = ext3_journal_get_write_access(handle, bh))) 648 if ((err = ext3_journal_get_write_access(handle, bh)))
649 break; 649 break;
650 lock_buffer(bh); 650 lock_buffer(bh);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index a93c3609025d..9e24ceb019fe 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -512,15 +512,14 @@ static void ext3_clear_inode(struct inode *inode)
512 512
513static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) 513static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
514{ 514{
515 struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb); 515 struct super_block *sb = vfs->mnt_sb;
516 struct ext3_sb_info *sbi = EXT3_SB(sb);
516 517
517 if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA) 518 if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
518 seq_puts(seq, ",data=journal"); 519 seq_puts(seq, ",data=journal");
519 520 else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
520 if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA)
521 seq_puts(seq, ",data=ordered"); 521 seq_puts(seq, ",data=ordered");
522 522 else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
523 if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA)
524 seq_puts(seq, ",data=writeback"); 523 seq_puts(seq, ",data=writeback");
525 524
526#if defined(CONFIG_QUOTA) 525#if defined(CONFIG_QUOTA)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index e79e49b3eec7..29f1e9f6e85c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -96,6 +96,8 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry,
96 fuse_lookup_init(req, dir, entry, &outarg); 96 fuse_lookup_init(req, dir, entry, &outarg);
97 request_send(fc, req); 97 request_send(fc, req);
98 err = req->out.h.error; 98 err = req->out.h.error;
99 if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID))
100 err = -EIO;
99 if (!err) { 101 if (!err) {
100 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 102 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
101 &outarg.attr); 103 &outarg.attr);
@@ -152,6 +154,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
152 fuse_put_request(fc, req); 154 fuse_put_request(fc, req);
153 return err; 155 return err;
154 } 156 }
157 if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) {
158 fuse_put_request(fc, req);
159 return -EIO;
160 }
155 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 161 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
156 &outarg.attr); 162 &outarg.attr);
157 if (!inode) { 163 if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6454022b0536..657ab11c173b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -23,6 +23,10 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
23 struct fuse_file *ff; 23 struct fuse_file *ff;
24 int err; 24 int err;
25 25
26 /* VFS checks this, but only _after_ ->open() */
27 if (file->f_flags & O_DIRECT)
28 return -EINVAL;
29
26 err = generic_file_open(inode, file); 30 err = generic_file_open(inode, file);
27 if (err) 31 if (err)
28 return err; 32 return err;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 59c5062cd63f..dd7113106269 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -793,11 +793,6 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
793 return(err); 793 return(err);
794} 794}
795 795
796void hostfs_truncate(struct inode *ino)
797{
798 not_implemented();
799}
800
801int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd) 796int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd)
802{ 797{
803 char *name; 798 char *name;
@@ -894,7 +889,6 @@ static struct inode_operations hostfs_iops = {
894 .rmdir = hostfs_rmdir, 889 .rmdir = hostfs_rmdir,
895 .mknod = hostfs_mknod, 890 .mknod = hostfs_mknod,
896 .rename = hostfs_rename, 891 .rename = hostfs_rename,
897 .truncate = hostfs_truncate,
898 .permission = hostfs_permission, 892 .permission = hostfs_permission,
899 .setattr = hostfs_setattr, 893 .setattr = hostfs_setattr,
900 .getattr = hostfs_getattr, 894 .getattr = hostfs_getattr,
@@ -910,7 +904,6 @@ static struct inode_operations hostfs_dir_iops = {
910 .rmdir = hostfs_rmdir, 904 .rmdir = hostfs_rmdir,
911 .mknod = hostfs_mknod, 905 .mknod = hostfs_mknod,
912 .rename = hostfs_rename, 906 .rename = hostfs_rename,
913 .truncate = hostfs_truncate,
914 .permission = hostfs_permission, 907 .permission = hostfs_permission,
915 .setattr = hostfs_setattr, 908 .setattr = hostfs_setattr,
916 .getattr = hostfs_getattr, 909 .getattr = hostfs_getattr,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 0ec62d5310db..9f942ca8e4e3 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -129,8 +129,7 @@ void jfs_delete_inode(struct inode *inode)
129 jfs_info("In jfs_delete_inode, inode = 0x%p", inode); 129 jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
130 130
131 if (!is_bad_inode(inode) && 131 if (!is_bad_inode(inode) &&
132 (JFS_IP(inode)->fileset == cpu_to_le32(FILESYSTEM_I))) { 132 (JFS_IP(inode)->fileset == FILESYSTEM_I)) {
133
134 truncate_inode_pages(&inode->i_data, 0); 133 truncate_inode_pages(&inode->i_data, 0);
135 134
136 if (test_cflag(COMMIT_Freewmap, inode)) 135 if (test_cflag(COMMIT_Freewmap, inode))
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index c739626f5bf1..eadf319bee22 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -3055,7 +3055,7 @@ static int cntlz(u32 value)
3055 * RETURN VALUES: 3055 * RETURN VALUES:
3056 * log2 number of blocks 3056 * log2 number of blocks
3057 */ 3057 */
3058int blkstol2(s64 nb) 3058static int blkstol2(s64 nb)
3059{ 3059{
3060 int l2nb; 3060 int l2nb;
3061 s64 mask; /* meant to be signed */ 3061 s64 mask; /* meant to be signed */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index c7a92f9deb2b..9b71ed2674fe 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -725,6 +725,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
725 else 725 else
726 tlck->flag = tlckINODELOCK; 726 tlck->flag = tlckINODELOCK;
727 727
728 if (S_ISDIR(ip->i_mode))
729 tlck->flag |= tlckDIRECTORY;
730
728 tlck->type = 0; 731 tlck->type = 0;
729 732
730 /* bind the tlock and the page */ 733 /* bind the tlock and the page */
@@ -1009,6 +1012,8 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
1009 1012
1010 /* bind the tlock and the object */ 1013 /* bind the tlock and the object */
1011 tlck->flag = tlckINODELOCK; 1014 tlck->flag = tlckINODELOCK;
1015 if (S_ISDIR(ip->i_mode))
1016 tlck->flag |= tlckDIRECTORY;
1012 tlck->ip = ip; 1017 tlck->ip = ip;
1013 tlck->mp = NULL; 1018 tlck->mp = NULL;
1014 1019
@@ -1077,6 +1082,8 @@ struct linelock *txLinelock(struct linelock * tlock)
1077 linelock->flag = tlckLINELOCK; 1082 linelock->flag = tlckLINELOCK;
1078 linelock->maxcnt = TLOCKLONG; 1083 linelock->maxcnt = TLOCKLONG;
1079 linelock->index = 0; 1084 linelock->index = 0;
1085 if (tlck->flag & tlckDIRECTORY)
1086 linelock->flag |= tlckDIRECTORY;
1080 1087
1081 /* append linelock after tlock */ 1088 /* append linelock after tlock */
1082 linelock->next = tlock->next; 1089 linelock->next = tlock->next;
@@ -2070,8 +2077,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2070 * 2077 *
2071 * function: log from maplock of freed data extents; 2078 * function: log from maplock of freed data extents;
2072 */ 2079 */
2073void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 2080static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2074 struct tlock * tlck) 2081 struct tlock * tlck)
2075{ 2082{
2076 struct pxd_lock *pxdlock; 2083 struct pxd_lock *pxdlock;
2077 int i, nlock; 2084 int i, nlock;
@@ -2209,7 +2216,7 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2209 * function: synchronously write pages locked by transaction 2216 * function: synchronously write pages locked by transaction
2210 * after txLog() but before txUpdateMap(); 2217 * after txLog() but before txUpdateMap();
2211 */ 2218 */
2212void txForce(struct tblock * tblk) 2219static void txForce(struct tblock * tblk)
2213{ 2220{
2214 struct tlock *tlck; 2221 struct tlock *tlck;
2215 lid_t lid, next; 2222 lid_t lid, next;
@@ -2358,7 +2365,7 @@ static void txUpdateMap(struct tblock * tblk)
2358 */ 2365 */
2359 else { /* (maplock->flag & mlckFREE) */ 2366 else { /* (maplock->flag & mlckFREE) */
2360 2367
2361 if (S_ISDIR(tlck->ip->i_mode)) 2368 if (tlck->flag & tlckDIRECTORY)
2362 txFreeMap(ipimap, maplock, 2369 txFreeMap(ipimap, maplock,
2363 tblk, COMMIT_PWMAP); 2370 tblk, COMMIT_PWMAP);
2364 else 2371 else
diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h
index 59ad0f6b7231..0e4dc4514c47 100644
--- a/fs/jfs/jfs_txnmgr.h
+++ b/fs/jfs/jfs_txnmgr.h
@@ -122,6 +122,7 @@ extern struct tlock *TxLock; /* transaction lock table */
122#define tlckLOG 0x0800 122#define tlckLOG 0x0800
123/* updateMap state */ 123/* updateMap state */
124#define tlckUPDATEMAP 0x0080 124#define tlckUPDATEMAP 0x0080
125#define tlckDIRECTORY 0x0040
125/* freeLock state */ 126/* freeLock state */
126#define tlckFREELOCK 0x0008 127#define tlckFREELOCK 0x0008
127#define tlckWRITEPAGE 0x0004 128#define tlckWRITEPAGE 0x0004
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6ceb1d471f20..9758ebd49905 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -184,14 +184,13 @@ static void nfs_readpage_release(struct nfs_page *req)
184{ 184{
185 unlock_page(req->wb_page); 185 unlock_page(req->wb_page);
186 186
187 nfs_clear_request(req);
188 nfs_release_request(req);
189
190 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 187 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
191 req->wb_context->dentry->d_inode->i_sb->s_id, 188 req->wb_context->dentry->d_inode->i_sb->s_id,
192 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 189 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
193 req->wb_bytes, 190 req->wb_bytes,
194 (long long)req_offset(req)); 191 (long long)req_offset(req));
192 nfs_clear_request(req);
193 nfs_release_request(req);
195} 194}
196 195
197/* 196/*
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index c7e9237379c2..83f3322765cd 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -29,7 +29,8 @@ ToDo/Notes:
29 The Windows boot will run chkdsk and then reboot. The user can then 29 The Windows boot will run chkdsk and then reboot. The user can then
30 immediately boot into Linux rather than having to do a full Windows 30 immediately boot into Linux rather than having to do a full Windows
31 boot first before rebooting into Linux and we will recognize such a 31 boot first before rebooting into Linux and we will recognize such a
32 journal and empty it as it is clean by definition. 32 journal and empty it as it is clean by definition. Note, this only
33 works if chkdsk left the journal in an obviously clean state.
33 - Support journals ($LogFile) with only one restart page as well as 34 - Support journals ($LogFile) with only one restart page as well as
34 journals with two different restart pages. We sanity check both and 35 journals with two different restart pages. We sanity check both and
35 either use the only sane one or the more recent one of the two in the 36 either use the only sane one or the more recent one of the two in the
@@ -94,6 +95,13 @@ ToDo/Notes:
94 my ways. 95 my ways.
95 - Fix various bugs in the runlist merging code. (Based on libntfs 96 - Fix various bugs in the runlist merging code. (Based on libntfs
96 changes by Richard Russon.) 97 changes by Richard Russon.)
98 - Fix sparse warnings that have crept in over time.
99 - Change ntfs_cluster_free() to require a write locked runlist on entry
100 since we otherwise get into a lock reversal deadlock if a read locked
101 runlist is passed in. In the process also change it to take an ntfs
102 inode instead of a vfs inode as parameter.
103 - Fix the definition of the CHKD ntfs record magic. It had an off by
104 two error causing it to be CHKB instead of CHKD.
97 105
982.1.23 - Implement extension of resident files and make writing safe as well as 1062.1.23 - Implement extension of resident files and make writing safe as well as
99 many bug fixes, cleanups, and enhancements... 107 many bug fixes, cleanups, and enhancements...
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 609ad1728ce4..01f2dfa39cec 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -123,7 +123,7 @@ enum {
123 magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ 123 magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */
124 124
125 /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ 125 /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */
126 magic_CHKD = const_cpu_to_le32(0x424b4843), /* Modified by chkdsk. */ 126 magic_CHKD = const_cpu_to_le32(0x444b4843), /* Modified by chkdsk. */
127 127
128 /* Found in all ntfs record containing records. */ 128 /* Found in all ntfs record containing records. */
129 magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector 129 magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector
@@ -308,10 +308,8 @@ typedef le16 MFT_RECORD_FLAGS;
308 * The _LE versions are to be applied on little endian MFT_REFs. 308 * The _LE versions are to be applied on little endian MFT_REFs.
309 * Note: The _LE versions will return a CPU endian formatted value! 309 * Note: The _LE versions will return a CPU endian formatted value!
310 */ 310 */
311typedef enum { 311#define MFT_REF_MASK_CPU 0x0000ffffffffffffULL
312 MFT_REF_MASK_CPU = 0x0000ffffffffffffULL, 312#define MFT_REF_MASK_LE const_cpu_to_le64(0x0000ffffffffffffULL)
313 MFT_REF_MASK_LE = const_cpu_to_le64(0x0000ffffffffffffULL),
314} MFT_REF_CONSTS;
315 313
316typedef u64 MFT_REF; 314typedef u64 MFT_REF;
317typedef le64 leMFT_REF; 315typedef le64 leMFT_REF;
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 7b5934290685..5af3bf0b7eee 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -779,14 +779,13 @@ out:
779 779
780/** 780/**
781 * __ntfs_cluster_free - free clusters on an ntfs volume 781 * __ntfs_cluster_free - free clusters on an ntfs volume
782 * @vi: vfs inode whose runlist describes the clusters to free 782 * @ni: ntfs inode whose runlist describes the clusters to free
783 * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters 783 * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters
784 * @count: number of clusters to free or -1 for all clusters 784 * @count: number of clusters to free or -1 for all clusters
785 * @write_locked: true if the runlist is locked for writing
786 * @is_rollback: true if this is a rollback operation 785 * @is_rollback: true if this is a rollback operation
787 * 786 *
788 * Free @count clusters starting at the cluster @start_vcn in the runlist 787 * Free @count clusters starting at the cluster @start_vcn in the runlist
789 * described by the vfs inode @vi. 788 * described by the vfs inode @ni.
790 * 789 *
791 * If @count is -1, all clusters from @start_vcn to the end of the runlist are 790 * If @count is -1, all clusters from @start_vcn to the end of the runlist are
792 * deallocated. Thus, to completely free all clusters in a runlist, use 791 * deallocated. Thus, to completely free all clusters in a runlist, use
@@ -801,31 +800,28 @@ out:
801 * Return the number of deallocated clusters (not counting sparse ones) on 800 * Return the number of deallocated clusters (not counting sparse ones) on
802 * success and -errno on error. 801 * success and -errno on error.
803 * 802 *
804 * Locking: - The runlist described by @vi must be locked on entry and is 803 * Locking: - The runlist described by @ni must be locked for writing on entry
805 * locked on return. Note if the runlist is locked for reading the 804 * and is locked on return. Note the runlist may be modified when
806 * lock may be dropped and reacquired. Note the runlist may be 805 * needed runlist fragments need to be mapped.
807 * modified when needed runlist fragments need to be mapped.
808 * - The volume lcn bitmap must be unlocked on entry and is unlocked 806 * - The volume lcn bitmap must be unlocked on entry and is unlocked
809 * on return. 807 * on return.
810 * - This function takes the volume lcn bitmap lock for writing and 808 * - This function takes the volume lcn bitmap lock for writing and
811 * modifies the bitmap contents. 809 * modifies the bitmap contents.
812 */ 810 */
813s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, 811s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
814 const BOOL write_locked, const BOOL is_rollback) 812 const BOOL is_rollback)
815{ 813{
816 s64 delta, to_free, total_freed, real_freed; 814 s64 delta, to_free, total_freed, real_freed;
817 ntfs_inode *ni;
818 ntfs_volume *vol; 815 ntfs_volume *vol;
819 struct inode *lcnbmp_vi; 816 struct inode *lcnbmp_vi;
820 runlist_element *rl; 817 runlist_element *rl;
821 int err; 818 int err;
822 819
823 BUG_ON(!vi); 820 BUG_ON(!ni);
824 ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " 821 ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count "
825 "0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn, 822 "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn,
826 (unsigned long long)count, 823 (unsigned long long)count,
827 is_rollback ? " (rollback)" : ""); 824 is_rollback ? " (rollback)" : "");
828 ni = NTFS_I(vi);
829 vol = ni->vol; 825 vol = ni->vol;
830 lcnbmp_vi = vol->lcnbmp_ino; 826 lcnbmp_vi = vol->lcnbmp_ino;
831 BUG_ON(!lcnbmp_vi); 827 BUG_ON(!lcnbmp_vi);
@@ -843,7 +839,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
843 839
844 total_freed = real_freed = 0; 840 total_freed = real_freed = 0;
845 841
846 rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, write_locked); 842 rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, TRUE);
847 if (IS_ERR(rl)) { 843 if (IS_ERR(rl)) {
848 if (!is_rollback) 844 if (!is_rollback)
849 ntfs_error(vol->sb, "Failed to find first runlist " 845 ntfs_error(vol->sb, "Failed to find first runlist "
@@ -897,7 +893,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
897 893
898 /* Attempt to map runlist. */ 894 /* Attempt to map runlist. */
899 vcn = rl->vcn; 895 vcn = rl->vcn;
900 rl = ntfs_attr_find_vcn_nolock(ni, vcn, write_locked); 896 rl = ntfs_attr_find_vcn_nolock(ni, vcn, TRUE);
901 if (IS_ERR(rl)) { 897 if (IS_ERR(rl)) {
902 err = PTR_ERR(rl); 898 err = PTR_ERR(rl);
903 if (!is_rollback) 899 if (!is_rollback)
@@ -965,8 +961,7 @@ err_out:
965 * If rollback fails, set the volume errors flag, emit an error 961 * If rollback fails, set the volume errors flag, emit an error
966 * message, and return the error code. 962 * message, and return the error code.
967 */ 963 */
968 delta = __ntfs_cluster_free(vi, start_vcn, total_freed, write_locked, 964 delta = __ntfs_cluster_free(ni, start_vcn, total_freed, TRUE);
969 TRUE);
970 if (delta < 0) { 965 if (delta < 0) {
971 ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " 966 ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
972 "inconsistent metadata! Unmount and run " 967 "inconsistent metadata! Unmount and run "
diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h
index e4d7fb98d685..a6a8827882e7 100644
--- a/fs/ntfs/lcnalloc.h
+++ b/fs/ntfs/lcnalloc.h
@@ -2,7 +2,7 @@
2 * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the 2 * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the
3 * Linux-NTFS project. 3 * Linux-NTFS project.
4 * 4 *
5 * Copyright (c) 2004 Anton Altaparmakov 5 * Copyright (c) 2004-2005 Anton Altaparmakov
6 * 6 *
7 * This program/include file is free software; you can redistribute it and/or 7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published 8 * modify it under the terms of the GNU General Public License as published
@@ -28,6 +28,7 @@
28#include <linux/fs.h> 28#include <linux/fs.h>
29 29
30#include "types.h" 30#include "types.h"
31#include "inode.h"
31#include "runlist.h" 32#include "runlist.h"
32#include "volume.h" 33#include "volume.h"
33 34
@@ -42,18 +43,17 @@ extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol,
42 const VCN start_vcn, const s64 count, const LCN start_lcn, 43 const VCN start_vcn, const s64 count, const LCN start_lcn,
43 const NTFS_CLUSTER_ALLOCATION_ZONES zone); 44 const NTFS_CLUSTER_ALLOCATION_ZONES zone);
44 45
45extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, 46extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
46 s64 count, const BOOL write_locked, const BOOL is_rollback); 47 s64 count, const BOOL is_rollback);
47 48
48/** 49/**
49 * ntfs_cluster_free - free clusters on an ntfs volume 50 * ntfs_cluster_free - free clusters on an ntfs volume
50 * @vi: vfs inode whose runlist describes the clusters to free 51 * @ni: ntfs inode whose runlist describes the clusters to free
51 * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters 52 * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters
52 * @count: number of clusters to free or -1 for all clusters 53 * @count: number of clusters to free or -1 for all clusters
53 * @write_locked: true if the runlist is locked for writing
54 * 54 *
55 * Free @count clusters starting at the cluster @start_vcn in the runlist 55 * Free @count clusters starting at the cluster @start_vcn in the runlist
56 * described by the vfs inode @vi. 56 * described by the ntfs inode @ni.
57 * 57 *
58 * If @count is -1, all clusters from @start_vcn to the end of the runlist are 58 * If @count is -1, all clusters from @start_vcn to the end of the runlist are
59 * deallocated. Thus, to completely free all clusters in a runlist, use 59 * deallocated. Thus, to completely free all clusters in a runlist, use
@@ -65,19 +65,18 @@ extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
65 * Return the number of deallocated clusters (not counting sparse ones) on 65 * Return the number of deallocated clusters (not counting sparse ones) on
66 * success and -errno on error. 66 * success and -errno on error.
67 * 67 *
68 * Locking: - The runlist described by @vi must be locked on entry and is 68 * Locking: - The runlist described by @ni must be locked for writing on entry
69 * locked on return. Note if the runlist is locked for reading the 69 * and is locked on return. Note the runlist may be modified when
70 * lock may be dropped and reacquired. Note the runlist may be 70 * needed runlist fragments need to be mapped.
71 * modified when needed runlist fragments need to be mapped.
72 * - The volume lcn bitmap must be unlocked on entry and is unlocked 71 * - The volume lcn bitmap must be unlocked on entry and is unlocked
73 * on return. 72 * on return.
74 * - This function takes the volume lcn bitmap lock for writing and 73 * - This function takes the volume lcn bitmap lock for writing and
75 * modifies the bitmap contents. 74 * modifies the bitmap contents.
76 */ 75 */
77static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn, 76static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
78 s64 count, const BOOL write_locked) 77 s64 count)
79{ 78{
80 return __ntfs_cluster_free(vi, start_vcn, count, write_locked, FALSE); 79 return __ntfs_cluster_free(ni, start_vcn, count, FALSE);
81} 80}
82 81
83extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, 82extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 0173e95500d9..0fd70295cca6 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -51,7 +51,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi,
51 RESTART_PAGE_HEADER *rp, s64 pos) 51 RESTART_PAGE_HEADER *rp, s64 pos)
52{ 52{
53 u32 logfile_system_page_size, logfile_log_page_size; 53 u32 logfile_system_page_size, logfile_log_page_size;
54 u16 usa_count, usa_ofs, usa_end, ra_ofs; 54 u16 ra_ofs, usa_count, usa_ofs, usa_end = 0;
55 BOOL have_usa = TRUE;
55 56
56 ntfs_debug("Entering."); 57 ntfs_debug("Entering.");
57 /* 58 /*
@@ -86,6 +87,14 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi,
86 (int)sle16_to_cpu(rp->minor_ver)); 87 (int)sle16_to_cpu(rp->minor_ver));
87 return FALSE; 88 return FALSE;
88 } 89 }
90 /*
91 * If chkdsk has been run the restart page may not be protected by an
92 * update sequence array.
93 */
94 if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) {
95 have_usa = FALSE;
96 goto skip_usa_checks;
97 }
89 /* Verify the size of the update sequence array. */ 98 /* Verify the size of the update sequence array. */
90 usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); 99 usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS);
91 if (usa_count != le16_to_cpu(rp->usa_count)) { 100 if (usa_count != le16_to_cpu(rp->usa_count)) {
@@ -102,6 +111,7 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi,
102 "inconsistent update sequence array offset."); 111 "inconsistent update sequence array offset.");
103 return FALSE; 112 return FALSE;
104 } 113 }
114skip_usa_checks:
105 /* 115 /*
106 * Verify the position of the restart area. It must be: 116 * Verify the position of the restart area. It must be:
107 * - aligned to 8-byte boundary, 117 * - aligned to 8-byte boundary,
@@ -109,7 +119,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi,
109 * - within the system page size. 119 * - within the system page size.
110 */ 120 */
111 ra_ofs = le16_to_cpu(rp->restart_area_offset); 121 ra_ofs = le16_to_cpu(rp->restart_area_offset);
112 if (ra_ofs & 7 || ra_ofs < usa_end || 122 if (ra_ofs & 7 || (have_usa ? ra_ofs < usa_end :
123 ra_ofs < sizeof(RESTART_PAGE_HEADER)) ||
113 ra_ofs > logfile_system_page_size) { 124 ra_ofs > logfile_system_page_size) {
114 ntfs_error(vi->i_sb, "$LogFile restart page specifies " 125 ntfs_error(vi->i_sb, "$LogFile restart page specifies "
115 "inconsistent restart area offset."); 126 "inconsistent restart area offset.");
@@ -402,8 +413,12 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
402 idx++; 413 idx++;
403 } while (to_read > 0); 414 } while (to_read > 0);
404 } 415 }
405 /* Perform the multi sector transfer deprotection on the buffer. */ 416 /*
406 if (post_read_mst_fixup((NTFS_RECORD*)trp, 417 * Perform the multi sector transfer deprotection on the buffer if the
418 * restart page is protected.
419 */
420 if ((!ntfs_is_chkd_record(trp->magic) || le16_to_cpu(trp->usa_count))
421 && post_read_mst_fixup((NTFS_RECORD*)trp,
407 le32_to_cpu(rp->system_page_size))) { 422 le32_to_cpu(rp->system_page_size))) {
408 /* 423 /*
409 * A multi sector tranfer error was detected. We only need to 424 * A multi sector tranfer error was detected. We only need to
@@ -615,11 +630,16 @@ is_empty:
615 * Otherwise just throw it away. 630 * Otherwise just throw it away.
616 */ 631 */
617 if (rstr2_lsn > rstr1_lsn) { 632 if (rstr2_lsn > rstr1_lsn) {
633 ntfs_debug("Using second restart page as it is more "
634 "recent.");
618 ntfs_free(rstr1_ph); 635 ntfs_free(rstr1_ph);
619 rstr1_ph = rstr2_ph; 636 rstr1_ph = rstr2_ph;
620 /* rstr1_lsn = rstr2_lsn; */ 637 /* rstr1_lsn = rstr2_lsn; */
621 } else 638 } else {
639 ntfs_debug("Using first restart page as it is more "
640 "recent.");
622 ntfs_free(rstr2_ph); 641 ntfs_free(rstr2_ph);
642 }
623 rstr2_ph = NULL; 643 rstr2_ph = NULL;
624 } 644 }
625 /* All consistency checks passed. */ 645 /* All consistency checks passed. */
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h
index 42388f95ea6d..a51f3dd0e9eb 100644
--- a/fs/ntfs/logfile.h
+++ b/fs/ntfs/logfile.h
@@ -113,7 +113,7 @@ typedef struct {
113 */ 113 */
114enum { 114enum {
115 RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), 115 RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002),
116 RESTART_SPACE_FILLER = 0xffff, /* gcc: Force enum bit width to 16. */ 116 RESTART_SPACE_FILLER = const_cpu_to_le16(0xffff), /* gcc: Force enum bit width to 16. */
117} __attribute__ ((__packed__)); 117} __attribute__ ((__packed__));
118 118
119typedef le16 RESTART_AREA_FLAGS; 119typedef le16 RESTART_AREA_FLAGS;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 2c32b84385a8..247586d1d5dc 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -1953,7 +1953,7 @@ restore_undo_alloc:
1953 a = ctx->attr; 1953 a = ctx->attr;
1954 a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); 1954 a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1);
1955undo_alloc: 1955undo_alloc:
1956 if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1, TRUE) < 0) { 1956 if (ntfs_cluster_free(mft_ni, old_last_vcn, -1) < 0) {
1957 ntfs_error(vol->sb, "Failed to free clusters from mft data " 1957 ntfs_error(vol->sb, "Failed to free clusters from mft data "
1958 "attribute.%s", es); 1958 "attribute.%s", es);
1959 NVolSetErrors(vol); 1959 NVolSetErrors(vol);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fb34f88a4a74..3b33f94020db 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -343,7 +343,8 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
343 343
344/* Same as proc_root_link, but this addionally tries to get fs from other 344/* Same as proc_root_link, but this addionally tries to get fs from other
345 * threads in the group */ 345 * threads in the group */
346static int proc_task_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 346static int proc_task_root_link(struct inode *inode, struct dentry **dentry,
347 struct vfsmount **mnt)
347{ 348{
348 struct fs_struct *fs; 349 struct fs_struct *fs;
349 int result = -ENOENT; 350 int result = -ENOENT;
@@ -357,9 +358,10 @@ static int proc_task_root_link(struct inode *inode, struct dentry **dentry, stru
357 } else { 358 } else {
358 /* Try to get fs from other threads */ 359 /* Try to get fs from other threads */
359 task_unlock(leader); 360 task_unlock(leader);
360 struct task_struct *task = leader;
361 read_lock(&tasklist_lock); 361 read_lock(&tasklist_lock);
362 if (pid_alive(task)) { 362 if (pid_alive(leader)) {
363 struct task_struct *task = leader;
364
363 while ((task = next_thread(task)) != leader) { 365 while ((task = next_thread(task)) != leader) {
364 task_lock(task); 366 task_lock(task);
365 fs = task->fs; 367 fs = task->fs;
diff --git a/fs/read_write.c b/fs/read_write.c
index b60324aaa2b6..a091ee4f430d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -499,6 +499,9 @@ static ssize_t do_readv_writev(int type, struct file *file,
499 ret = rw_verify_area(type, file, pos, tot_len); 499 ret = rw_verify_area(type, file, pos, tot_len);
500 if (ret) 500 if (ret)
501 goto out; 501 goto out;
502 ret = security_file_permission(file, type == READ ? MAY_READ : MAY_WRITE);
503 if (ret)
504 goto out;
502 505
503 fnv = NULL; 506 fnv = NULL;
504 if (type == READ) { 507 if (type == READ) {
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
index 399c33b7be51..0a4a8b40dfcd 100644
--- a/include/asm-alpha/compiler.h
+++ b/include/asm-alpha/compiler.h
@@ -98,6 +98,9 @@
98#undef inline 98#undef inline
99#undef __inline__ 99#undef __inline__
100#undef __inline 100#undef __inline
101 101#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
102#undef __always_inline
103#define __always_inline inline __attribute__((always_inline))
104#endif
102 105
103#endif /* __ALPHA_COMPILER_H */ 106#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-arm/arch-rpc/hardware.h b/include/asm-arm/arch-rpc/hardware.h
index be9754a05c19..9d7f87375aa7 100644
--- a/include/asm-arm/arch-rpc/hardware.h
+++ b/include/asm-arm/arch-rpc/hardware.h
@@ -15,7 +15,7 @@
15#include <asm/arch/memory.h> 15#include <asm/arch/memory.h>
16 16
17#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
18#define IOMEM(x) ((void __iomem *)(x)) 18#define IOMEM(x) ((void __iomem *)(unsigned long)(x))
19#else 19#else
20#define IOMEM(x) x 20#define IOMEM(x) x
21#endif /* __ASSEMBLY__ */ 21#endif /* __ASSEMBLY__ */
@@ -52,7 +52,7 @@
52/* 52/*
53 * IO Addresses 53 * IO Addresses
54 */ 54 */
55#define VIDC_BASE (void __iomem *)0xe0400000 55#define VIDC_BASE IOMEM(0xe0400000)
56#define EXPMASK_BASE 0xe0360000 56#define EXPMASK_BASE 0xe0360000
57#define IOMD_BASE IOMEM(0xe0200000) 57#define IOMD_BASE IOMEM(0xe0200000)
58#define IOC_BASE IOMEM(0xe0200000) 58#define IOC_BASE IOMEM(0xe0200000)
diff --git a/include/asm-arm/arch-s3c2410/anubis-map.h b/include/asm-arm/arch-s3c2410/anubis-map.h
index 97741d6e506a..d529ffda8599 100644
--- a/include/asm-arm/arch-s3c2410/anubis-map.h
+++ b/include/asm-arm/arch-s3c2410/anubis-map.h
@@ -20,22 +20,22 @@
20 20
21/* start peripherals off after the S3C2410 */ 21/* start peripherals off after the S3C2410 */
22 22
23#define ANUBIS_IOADDR(x) (S3C2410_ADDR((x) + 0x02000000)) 23#define ANUBIS_IOADDR(x) (S3C2410_ADDR((x) + 0x01800000))
24 24
25#define ANUBIS_PA_CPLD (S3C2410_CS1 | (1<<26)) 25#define ANUBIS_PA_CPLD (S3C2410_CS1 | (1<<26))
26 26
27/* we put the CPLD registers next, to get them out of the way */ 27/* we put the CPLD registers next, to get them out of the way */
28 28
29#define ANUBIS_VA_CTRL1 ANUBIS_IOADDR(0x00000000) /* 0x01300000 */ 29#define ANUBIS_VA_CTRL1 ANUBIS_IOADDR(0x00000000) /* 0x01800000 */
30#define ANUBIS_PA_CTRL1 (ANUBIS_PA_CPLD) 30#define ANUBIS_PA_CTRL1 (ANUBIS_PA_CPLD)
31 31
32#define ANUBIS_VA_CTRL2 ANUBIS_IOADDR(0x00100000) /* 0x01400000 */ 32#define ANUBIS_VA_CTRL2 ANUBIS_IOADDR(0x00100000) /* 0x01900000 */
33#define ANUBIS_PA_CTRL2 (ANUBIS_PA_CPLD) 33#define ANUBIS_PA_CTRL2 (ANUBIS_PA_CPLD)
34 34
35#define ANUBIS_VA_CTRL3 ANUBIS_IOADDR(0x00200000) /* 0x01500000 */ 35#define ANUBIS_VA_CTRL3 ANUBIS_IOADDR(0x00200000) /* 0x01A00000 */
36#define ANUBIS_PA_CTRL3 (ANUBIS_PA_CPLD) 36#define ANUBIS_PA_CTRL3 (ANUBIS_PA_CPLD)
37 37
38#define ANUBIS_VA_CTRL4 ANUBIS_IOADDR(0x00300000) /* 0x01600000 */ 38#define ANUBIS_VA_CTRL4 ANUBIS_IOADDR(0x00300000) /* 0x01B00000 */
39#define ANUBIS_PA_CTRL4 (ANUBIS_PA_CPLD) 39#define ANUBIS_PA_CTRL4 (ANUBIS_PA_CPLD)
40 40
41#define ANUBIS_IDEPRI ANUBIS_IOADDR(0x01000000) 41#define ANUBIS_IDEPRI ANUBIS_IOADDR(0x01000000)
diff --git a/include/asm-arm/arch-versatile/io.h b/include/asm-arm/arch-versatile/io.h
index 9f895bf61494..47e904cf25c7 100644
--- a/include/asm-arm/arch-versatile/io.h
+++ b/include/asm-arm/arch-versatile/io.h
@@ -22,7 +22,11 @@
22 22
23#define IO_SPACE_LIMIT 0xffffffff 23#define IO_SPACE_LIMIT 0xffffffff
24 24
25#define __io(a) ((void __iomem *)(a)) 25static inline void __iomem *__io(unsigned long addr)
26{
27 return (void __iomem *)addr;
28}
29#define __io(a) __io(a)
26#define __mem_pci(a) (a) 30#define __mem_pci(a) (a)
27#define __mem_isa(a) (a) 31#define __mem_isa(a) (a)
28 32
diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h
index a1696ba238d3..7da97a937548 100644
--- a/include/asm-arm/elf.h
+++ b/include/asm-arm/elf.h
@@ -124,6 +124,8 @@ do { \
124 if (((ex).e_flags & EF_ARM_EABI_MASK) || \ 124 if (((ex).e_flags & EF_ARM_EABI_MASK) || \
125 ((ex).e_flags & EF_ARM_SOFT_FLOAT)) \ 125 ((ex).e_flags & EF_ARM_SOFT_FLOAT)) \
126 set_thread_flag(TIF_USING_IWMMXT); \ 126 set_thread_flag(TIF_USING_IWMMXT); \
127 else \
128 clear_thread_flag(TIF_USING_IWMMXT); \
127} while (0) 129} while (0)
128 130
129#endif 131#endif
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index cfa71a0dffb6..5c4ae8f5dbb0 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -136,9 +136,9 @@ extern void __readwrite_bug(const char *fn);
136/* 136/*
137 * String version of IO memory access ops: 137 * String version of IO memory access ops:
138 */ 138 */
139extern void _memcpy_fromio(void *, void __iomem *, size_t); 139extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
140extern void _memcpy_toio(void __iomem *, const void *, size_t); 140extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
141extern void _memset_io(void __iomem *, int, size_t); 141extern void _memset_io(volatile void __iomem *, int, size_t);
142 142
143#define mmiowb() 143#define mmiowb()
144 144
diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h
index 760f6e65af05..ced69161917b 100644
--- a/include/asm-arm/signal.h
+++ b/include/asm-arm/signal.h
@@ -115,7 +115,6 @@ typedef unsigned long sigset_t;
115 115
116#ifdef __KERNEL__ 116#ifdef __KERNEL__
117#define SA_TIMER 0x40000000 117#define SA_TIMER 0x40000000
118#define SA_IRQNOMASK 0x08000000
119#endif 118#endif
120 119
121#include <asm-generic/signal.h> 120#include <asm-generic/signal.h>
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f86c1e549466..ff28c8b31f58 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -158,6 +158,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
158#define lazy_mmu_prot_update(pte) do { } while (0) 158#define lazy_mmu_prot_update(pte) do { } while (0)
159#endif 159#endif
160 160
161#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE
162#define move_pte(pte, prot, old_addr, new_addr) (pte)
163#else
164#define move_pte(pte, prot, old_addr, new_addr) \
165({ \
166 pte_t newpte = (pte); \
167 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
168 pte_page(pte) == ZERO_PAGE(old_addr)) \
169 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
170 newpte; \
171})
172#endif
173
161/* 174/*
162 * When walking page tables, get the address of the next boundary, 175 * When walking page tables, get the address of the next boundary,
163 * or the end address of the range if that comes earlier. Although no 176 * or the end address of the range if that comes earlier. Although no
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 4ac84cc6f01a..622815bf3243 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -18,6 +18,8 @@
18#include <asm/irq.h> 18#include <asm/irq.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20 20
21struct hw_interrupt_type;
22
21/* 23/*
22 * Various low-level irq details needed by irq.c, process.c, 24 * Various low-level irq details needed by irq.c, process.c,
23 * time.c, io_apic.c and smp.c 25 * time.c, io_apic.c and smp.c
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index 97a28b8b2ddd..c7d9c9ed38ba 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -80,7 +80,12 @@ struct ia64_sal_os_state {
80 u64 sal_ra; /* Return address in SAL, physical */ 80 u64 sal_ra; /* Return address in SAL, physical */
81 u64 sal_gp; /* GP of the SAL - physical */ 81 u64 sal_gp; /* GP of the SAL - physical */
82 pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ 82 pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
83 /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
84 * Note: if the MCA/INIT recovery code wants to resume to a new context
85 * then it must change these values to reflect the new kernel stack.
86 */
83 u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ 87 u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
88 u64 prev_IA64_KR_CURRENT_STACK;
84 struct task_struct *prev_task; /* previous task, NULL if it is not useful */ 89 struct task_struct *prev_task; /* previous task, NULL if it is not useful */
85 /* Some interrupt registers are not saved in minstate, pt_regs or 90 /* Some interrupt registers are not saved in minstate, pt_regs or
86 * switch_stack. Because MCA/INIT can occur when interrupts are 91 * switch_stack. Because MCA/INIT can occur when interrupts are
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h
index 3a7829bb5954..9adb51211c22 100644
--- a/include/asm-ia64/uaccess.h
+++ b/include/asm-ia64/uaccess.h
@@ -187,8 +187,8 @@ extern void __get_user_unknown (void);
187({ \ 187({ \
188 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 188 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
189 __typeof__ (size) __gu_size = (size); \ 189 __typeof__ (size) __gu_size = (size); \
190 long __gu_err = -EFAULT, __gu_val = 0; \ 190 long __gu_err = -EFAULT; \
191 \ 191 unsigned long __gu_val = 0; \
192 if (!check || __access_ok(__gu_ptr, size, segment)) \ 192 if (!check || __access_ok(__gu_ptr, size, segment)) \
193 switch (__gu_size) { \ 193 switch (__gu_size) { \
194 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ 194 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
@@ -240,13 +240,13 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
240static inline unsigned long 240static inline unsigned long
241__copy_to_user (void __user *to, const void *from, unsigned long count) 241__copy_to_user (void __user *to, const void *from, unsigned long count)
242{ 242{
243 return __copy_user(to, (void __user *) from, count); 243 return __copy_user(to, (__force void __user *) from, count);
244} 244}
245 245
246static inline unsigned long 246static inline unsigned long
247__copy_from_user (void *to, const void __user *from, unsigned long count) 247__copy_from_user (void *to, const void __user *from, unsigned long count)
248{ 248{
249 return __copy_user((void __user *) to, from, count); 249 return __copy_user((__force void __user *) to, from, count);
250} 250}
251 251
252#define __copy_to_user_inatomic __copy_to_user 252#define __copy_to_user_inatomic __copy_to_user
@@ -258,7 +258,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
258 long __cu_len = (n); \ 258 long __cu_len = (n); \
259 \ 259 \
260 if (__access_ok(__cu_to, __cu_len, get_fs())) \ 260 if (__access_ok(__cu_to, __cu_len, get_fs())) \
261 __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ 261 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
262 __cu_len; \ 262 __cu_len; \
263}) 263})
264 264
@@ -270,7 +270,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
270 \ 270 \
271 __chk_user_ptr(__cu_from); \ 271 __chk_user_ptr(__cu_from); \
272 if (__access_ok(__cu_from, __cu_len, get_fs())) \ 272 if (__access_ok(__cu_from, __cu_len, get_fs())) \
273 __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ 273 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
274 __cu_len; \ 274 __cu_len; \
275}) 275})
276 276
diff --git a/include/asm-m32r/io.h b/include/asm-m32r/io.h
index 8e9e481e6996..70ad1c949c2b 100644
--- a/include/asm-m32r/io.h
+++ b/include/asm-m32r/io.h
@@ -60,7 +60,7 @@ __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
60 * address. 60 * address.
61 */ 61 */
62 62
63static inline void * ioremap(unsigned long offset, unsigned long size) 63static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
64{ 64{
65 return __ioremap(offset, size, 0); 65 return __ioremap(offset, size, 0);
66} 66}
diff --git a/include/asm-m32r/uaccess.h b/include/asm-m32r/uaccess.h
index 93d863c455a1..0da7c47d2f01 100644
--- a/include/asm-m32r/uaccess.h
+++ b/include/asm-m32r/uaccess.h
@@ -208,7 +208,8 @@ extern void __get_user_4(void);
208 * On error, the variable @x is set to zero. 208 * On error, the variable @x is set to zero.
209 */ 209 */
210#define get_user(x,ptr) \ 210#define get_user(x,ptr) \
211({ int __ret_gu,__val_gu; \ 211({ int __ret_gu; \
212 unsigned long __val_gu; \
212 __chk_user_ptr(ptr); \ 213 __chk_user_ptr(ptr); \
213 switch(sizeof (*(ptr))) { \ 214 switch(sizeof (*(ptr))) { \
214 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ 215 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
@@ -403,7 +404,8 @@ struct __large_struct { unsigned long buf[100]; };
403 404
404#define __get_user_nocheck(x,ptr,size) \ 405#define __get_user_nocheck(x,ptr,size) \
405({ \ 406({ \
406 long __gu_err, __gu_val; \ 407 long __gu_err; \
408 unsigned long __gu_val; \
407 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 409 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
408 (x) = (__typeof__(*(ptr)))__gu_val; \ 410 (x) = (__typeof__(*(ptr)))__gu_val; \
409 __gu_err; \ 411 __gu_err; \
@@ -594,8 +596,8 @@ static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
594 return n; 596 return n;
595} 597}
596 598
597unsigned long __generic_copy_to_user(void *, const void *, unsigned long); 599unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
598unsigned long __generic_copy_from_user(void *, const void *, unsigned long); 600unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
599 601
600/** 602/**
601 * __copy_to_user: - Copy a block of data into user space, with less checking. 603 * __copy_to_user: - Copy a block of data into user space, with less checking.
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index cbd1672c94cb..eaf5d9b3a0e1 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -68,6 +68,8 @@ extern unsigned long zero_page_mask;
68#define ZERO_PAGE(vaddr) \ 68#define ZERO_PAGE(vaddr) \
69 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 69 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
70 70
71#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE
72
71extern void paging_init(void); 73extern void paging_init(void);
72 74
73/* 75/*
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 7eb7cf6360bd..94d83998a759 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -56,7 +56,7 @@ extern unsigned long pci_dram_offset;
56 * is actually performed (i.e. the data has come back) before we start 56 * is actually performed (i.e. the data has come back) before we start
57 * executing any following instructions. 57 * executing any following instructions.
58 */ 58 */
59extern inline int in_8(volatile unsigned char __iomem *addr) 59extern inline int in_8(const volatile unsigned char __iomem *addr)
60{ 60{
61 int ret; 61 int ret;
62 62
@@ -72,7 +72,7 @@ extern inline void out_8(volatile unsigned char __iomem *addr, int val)
72 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); 72 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
73} 73}
74 74
75extern inline int in_le16(volatile unsigned short __iomem *addr) 75extern inline int in_le16(const volatile unsigned short __iomem *addr)
76{ 76{
77 int ret; 77 int ret;
78 78
@@ -83,7 +83,7 @@ extern inline int in_le16(volatile unsigned short __iomem *addr)
83 return ret; 83 return ret;
84} 84}
85 85
86extern inline int in_be16(volatile unsigned short __iomem *addr) 86extern inline int in_be16(const volatile unsigned short __iomem *addr)
87{ 87{
88 int ret; 88 int ret;
89 89
@@ -104,7 +104,7 @@ extern inline void out_be16(volatile unsigned short __iomem *addr, int val)
104 __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); 104 __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
105} 105}
106 106
107extern inline unsigned in_le32(volatile unsigned __iomem *addr) 107extern inline unsigned in_le32(const volatile unsigned __iomem *addr)
108{ 108{
109 unsigned ret; 109 unsigned ret;
110 110
@@ -115,7 +115,7 @@ extern inline unsigned in_le32(volatile unsigned __iomem *addr)
115 return ret; 115 return ret;
116} 116}
117 117
118extern inline unsigned in_be32(volatile unsigned __iomem *addr) 118extern inline unsigned in_be32(const volatile unsigned __iomem *addr)
119{ 119{
120 unsigned ret; 120 unsigned ret;
121 121
@@ -139,7 +139,7 @@ extern inline void out_be32(volatile unsigned __iomem *addr, int val)
139#define readb(addr) in_8((volatile u8 *)(addr)) 139#define readb(addr) in_8((volatile u8 *)(addr))
140#define writeb(b,addr) out_8((volatile u8 *)(addr), (b)) 140#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
141#else 141#else
142static inline __u8 readb(volatile void __iomem *addr) 142static inline __u8 readb(const volatile void __iomem *addr)
143{ 143{
144 return in_8(addr); 144 return in_8(addr);
145} 145}
@@ -150,11 +150,11 @@ static inline void writeb(__u8 b, volatile void __iomem *addr)
150#endif 150#endif
151 151
152#if defined(CONFIG_APUS) 152#if defined(CONFIG_APUS)
153static inline __u16 readw(volatile void __iomem *addr) 153static inline __u16 readw(const volatile void __iomem *addr)
154{ 154{
155 return *(__force volatile __u16 *)(addr); 155 return *(__force volatile __u16 *)(addr);
156} 156}
157static inline __u32 readl(volatile void __iomem *addr) 157static inline __u32 readl(const volatile void __iomem *addr)
158{ 158{
159 return *(__force volatile __u32 *)(addr); 159 return *(__force volatile __u32 *)(addr);
160} 160}
@@ -173,11 +173,11 @@ static inline void writel(__u32 b, volatile void __iomem *addr)
173#define writew(b,addr) out_le16((volatile u16 *)(addr),(b)) 173#define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
174#define writel(b,addr) out_le32((volatile u32 *)(addr),(b)) 174#define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
175#else 175#else
176static inline __u16 readw(volatile void __iomem *addr) 176static inline __u16 readw(const volatile void __iomem *addr)
177{ 177{
178 return in_le16(addr); 178 return in_le16(addr);
179} 179}
180static inline __u32 readl(volatile void __iomem *addr) 180static inline __u32 readl(const volatile void __iomem *addr)
181{ 181{
182 return in_le32(addr); 182 return in_le32(addr);
183} 183}
diff --git a/include/asm-ppc/macio.h b/include/asm-ppc/macio.h
index a481b772d154..b553dd4b139e 100644
--- a/include/asm-ppc/macio.h
+++ b/include/asm-ppc/macio.h
@@ -1,7 +1,6 @@
1#ifndef __MACIO_ASIC_H__ 1#ifndef __MACIO_ASIC_H__
2#define __MACIO_ASIC_H__ 2#define __MACIO_ASIC_H__
3 3
4#include <linux/mod_devicetable.h>
5#include <asm/of_device.h> 4#include <asm/of_device.h>
6 5
7extern struct bus_type macio_bus_type; 6extern struct bus_type macio_bus_type;
diff --git a/include/asm-ppc/mv64x60.h b/include/asm-ppc/mv64x60.h
index 75c2ffa26b26..ee2f9188cc64 100644
--- a/include/asm-ppc/mv64x60.h
+++ b/include/asm-ppc/mv64x60.h
@@ -233,7 +233,7 @@ struct mv64x60_chip_info {
233struct mv64x60_handle { 233struct mv64x60_handle {
234 u32 type; /* type of bridge */ 234 u32 type; /* type of bridge */
235 u32 rev; /* revision of bridge */ 235 u32 rev; /* revision of bridge */
236 void *v_base; /* virtual base addr of bridge regs */ 236 void __iomem *v_base;/* virtual base addr of bridge regs */
237 phys_addr_t p_base; /* physical base addr of bridge regs */ 237 phys_addr_t p_base; /* physical base addr of bridge regs */
238 238
239 u32 pci_mode_a; /* pci 0 mode: conventional pci, pci-x*/ 239 u32 pci_mode_a; /* pci 0 mode: conventional pci, pci-x*/
@@ -303,7 +303,7 @@ void mv64x60_alloc_hose(struct mv64x60_handle *bh, u32 cfg_addr,
303 u32 cfg_data, struct pci_controller **hose); 303 u32 cfg_data, struct pci_controller **hose);
304int mv64x60_get_type(struct mv64x60_handle *bh); 304int mv64x60_get_type(struct mv64x60_handle *bh);
305int mv64x60_setup_for_chip(struct mv64x60_handle *bh); 305int mv64x60_setup_for_chip(struct mv64x60_handle *bh);
306void *mv64x60_get_bridge_vbase(void); 306void __iomem *mv64x60_get_bridge_vbase(void);
307u32 mv64x60_get_bridge_type(void); 307u32 mv64x60_get_bridge_type(void);
308u32 mv64x60_get_bridge_rev(void); 308u32 mv64x60_get_bridge_rev(void);
309void mv64x60_get_mem_windows(struct mv64x60_handle *bh, 309void mv64x60_get_mem_windows(struct mv64x60_handle *bh,
diff --git a/include/asm-ppc/of_device.h b/include/asm-ppc/of_device.h
index 4b264cfd3998..575bce418f80 100644
--- a/include/asm-ppc/of_device.h
+++ b/include/asm-ppc/of_device.h
@@ -2,6 +2,7 @@
2#define __OF_DEVICE_H__ 2#define __OF_DEVICE_H__
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/mod_devicetable.h>
5#include <asm/prom.h> 6#include <asm/prom.h>
6 7
7/* 8/*
@@ -55,7 +56,9 @@ extern int of_register_driver(struct of_platform_driver *drv);
55extern void of_unregister_driver(struct of_platform_driver *drv); 56extern void of_unregister_driver(struct of_platform_driver *drv);
56extern int of_device_register(struct of_device *ofdev); 57extern int of_device_register(struct of_device *ofdev);
57extern void of_device_unregister(struct of_device *ofdev); 58extern void of_device_unregister(struct of_device *ofdev);
58extern struct of_device *of_platform_device_create(struct device_node *np, const char *bus_id); 59extern struct of_device *of_platform_device_create(struct device_node *np,
60 const char *bus_id,
61 struct device *parent);
59extern void of_release_dev(struct device *dev); 62extern void of_release_dev(struct device *dev);
60 63
61#endif /* __OF_DEVICE_H__ */ 64#endif /* __OF_DEVICE_H__ */
diff --git a/include/asm-ppc64/smu.h b/include/asm-ppc64/smu.h
index 10b4397af9aa..dee8eefe47bc 100644
--- a/include/asm-ppc64/smu.h
+++ b/include/asm-ppc64/smu.h
@@ -1,22 +1,379 @@
1#ifndef _SMU_H
2#define _SMU_H
3
1/* 4/*
2 * Definitions for talking to the SMU chip in newer G5 PowerMacs 5 * Definitions for talking to the SMU chip in newer G5 PowerMacs
3 */ 6 */
4 7
5#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/list.h>
10
11/*
12 * Known SMU commands
13 *
14 * Most of what is below comes from looking at the Open Firmware driver,
15 * though this is still incomplete and could use better documentation here
16 * or there...
17 */
18
19
20/*
21 * Partition info commands
22 *
23 * I do not know what those are for at this point
24 */
25#define SMU_CMD_PARTITION_COMMAND 0x3e
26
27
28/*
29 * Fan control
30 *
31 * This is a "mux" for fan control commands, first byte is the
32 * "sub" command.
33 */
34#define SMU_CMD_FAN_COMMAND 0x4a
35
36
37/*
38 * Battery access
39 *
40 * Same command number as the PMU, could it be same syntax ?
41 */
42#define SMU_CMD_BATTERY_COMMAND 0x6f
43#define SMU_CMD_GET_BATTERY_INFO 0x00
44
45/*
46 * Real time clock control
47 *
48 * This is a "mux", first data byte contains the "sub" command.
49 * The "RTC" part of the SMU controls the date, time, powerup
50 * timer, but also a PRAM
51 *
52 * Dates are in BCD format on 7 bytes:
53 * [sec] [min] [hour] [weekday] [month day] [month] [year]
54 * with month being 1 based and year minus 100
55 */
56#define SMU_CMD_RTC_COMMAND 0x8e
57#define SMU_CMD_RTC_SET_PWRUP_TIMER 0x00 /* i: 7 bytes date */
58#define SMU_CMD_RTC_GET_PWRUP_TIMER 0x01 /* o: 7 bytes date */
59#define SMU_CMD_RTC_STOP_PWRUP_TIMER 0x02
60#define SMU_CMD_RTC_SET_PRAM_BYTE_ACC 0x20 /* i: 1 byte (address?) */
61#define SMU_CMD_RTC_SET_PRAM_AUTOINC 0x21 /* i: 1 byte (data?) */
62#define SMU_CMD_RTC_SET_PRAM_LO_BYTES 0x22 /* i: 10 bytes */
63#define SMU_CMD_RTC_SET_PRAM_HI_BYTES 0x23 /* i: 10 bytes */
64#define SMU_CMD_RTC_GET_PRAM_BYTE 0x28 /* i: 1 bytes (address?) */
65#define SMU_CMD_RTC_GET_PRAM_LO_BYTES 0x29 /* o: 10 bytes */
66#define SMU_CMD_RTC_GET_PRAM_HI_BYTES 0x2a /* o: 10 bytes */
67#define SMU_CMD_RTC_SET_DATETIME 0x80 /* i: 7 bytes date */
68#define SMU_CMD_RTC_GET_DATETIME 0x81 /* o: 7 bytes date */
69
70 /*
71 * i2c commands
72 *
73 * To issue an i2c command, first is to send a parameter block to the
74 * the SMU. This is a command of type 0x9a with 9 bytes of header
75 * eventually followed by data for a write:
76 *
77 * 0: bus number (from device-tree usually, SMU has lots of busses !)
78 * 1: transfer type/format (see below)
79 * 2: device address. For combined and combined4 type transfers, this
80 * is the "write" version of the address (bit 0x01 cleared)
81 * 3: subaddress length (0..3)
82 * 4: subaddress byte 0 (or only byte for subaddress length 1)
83 * 5: subaddress byte 1
84 * 6: subaddress byte 2
85 * 7: combined address (device address for combined mode data phase)
86 * 8: data length
87 *
88 * The transfer types are the same good old Apple ones it seems,
89 * that is:
90 * - 0x00: Simple transfer
91 * - 0x01: Subaddress transfer (addr write + data tx, no restart)
92 * - 0x02: Combined transfer (addr write + restart + data tx)
93 *
94 * This is then followed by actual data for a write.
95 *
96 * At this point, the OF driver seems to have a limitation on transfer
97 * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
98 * wether this is just an OF limit due to some temporary buffer size
99 * or if this is an SMU imposed limit. This driver has the same limitation
100 * for now as I use a 0x10 bytes temporary buffer as well
101 *
102 * Once that is completed, a response is expected from the SMU. This is
103 * obtained via a command of type 0x9a with a length of 1 byte containing
104 * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's
105 * though I can't tell yet if this is actually necessary. Once this command
106 * is complete, at this point, all I can tell is what OF does. OF tests
107 * byte 0 of the reply:
108 * - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ?
109 * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
110 * - on write, < 0 -> failure (immediate exit)
111 * - else, OF just exists (without error, weird)
112 *
113 * So on read, there is this wait-for-busy thing when getting a 0xfc or
114 * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and
115 * doing the above again until either the retries expire or the result
116 * is no longer 0xfe or 0xfc
117 *
118 * The Darwin I2C driver is less subtle though. On any non-success status
119 * from the response command, it waits 5ms and tries again up to 20 times,
120 * it doesn't differenciate between fatal errors or "busy" status.
121 *
122 * This driver provides an asynchronous paramblock based i2c command
123 * interface to be used either directly by low level code or by a higher
124 * level driver interfacing to the linux i2c layer. The current
125 * implementation of this relies on working timers & timer interrupts
126 * though, so be careful of calling context for now. This may be "fixed"
127 * in the future by adding a polling facility.
128 */
129#define SMU_CMD_I2C_COMMAND 0x9a
130 /* transfer types */
131#define SMU_I2C_TRANSFER_SIMPLE 0x00
132#define SMU_I2C_TRANSFER_STDSUB 0x01
133#define SMU_I2C_TRANSFER_COMBINED 0x02
134
135/*
136 * Power supply control
137 *
138 * The "sub" command is an ASCII string in the data, the
139 * data lenght is that of the string.
140 *
141 * The VSLEW command can be used to get or set the voltage slewing.
142 * - lenght 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
143 * reply at data offset 6, 7 and 8.
144 * - lenght 8 ("VSLEWxyz") has 3 additional bytes appended, and is
145 * used to set the voltage slewing point. The SMU replies with "DONE"
146 * I yet have to figure out their exact meaning of those 3 bytes in
147 * both cases.
148 *
149 */
150#define SMU_CMD_POWER_COMMAND 0xaa
151#define SMU_CMD_POWER_RESTART "RESTART"
152#define SMU_CMD_POWER_SHUTDOWN "SHUTDOWN"
153#define SMU_CMD_POWER_VOLTAGE_SLEW "VSLEW"
154
155/* Misc commands
156 *
157 * This command seem to be a grab bag of various things
158 */
159#define SMU_CMD_MISC_df_COMMAND 0xdf
160#define SMU_CMD_MISC_df_SET_DISPLAY_LIT 0x02 /* i: 1 byte */
161#define SMU_CMD_MISC_df_NMI_OPTION 0x04
162
163/*
164 * Version info commands
165 *
166 * I haven't quite tried to figure out how these work
167 */
168#define SMU_CMD_VERSION_COMMAND 0xea
169
170
171/*
172 * Misc commands
173 *
174 * This command seem to be a grab bag of various things
175 */
176#define SMU_CMD_MISC_ee_COMMAND 0xee
177#define SMU_CMD_MISC_ee_GET_DATABLOCK_REC 0x02
178#define SMU_CMD_MISC_ee_LEDS_CTRL 0x04 /* i: 00 (00,01) [00] */
179#define SMU_CMD_MISC_ee_GET_DATA 0x05 /* i: 00 , o: ?? */
180
181
182
183/*
184 * - Kernel side interface -
185 */
186
187#ifdef __KERNEL__
188
189/*
190 * Asynchronous SMU commands
191 *
192 * Fill up this structure and submit it via smu_queue_command(),
193 * and get notified by the optional done() callback, or because
194 * status becomes != 1
195 */
196
197struct smu_cmd;
198
199struct smu_cmd
200{
201 /* public */
202 u8 cmd; /* command */
203 int data_len; /* data len */
204 int reply_len; /* reply len */
205 void *data_buf; /* data buffer */
206 void *reply_buf; /* reply buffer */
207 int status; /* command status */
208 void (*done)(struct smu_cmd *cmd, void *misc);
209 void *misc;
210
211 /* private */
212 struct list_head link;
213};
214
215/*
216 * Queues an SMU command, all fields have to be initialized
217 */
218extern int smu_queue_cmd(struct smu_cmd *cmd);
219
220/*
221 * Simple command wrapper. This structure embeds a small buffer
222 * to ease sending simple SMU commands from the stack
223 */
224struct smu_simple_cmd
225{
226 struct smu_cmd cmd;
227 u8 buffer[16];
228};
229
230/*
231 * Queues a simple command. All fields will be initialized by that
232 * function
233 */
234extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
235 unsigned int data_len,
236 void (*done)(struct smu_cmd *cmd, void *misc),
237 void *misc,
238 ...);
239
240/*
241 * Completion helper. Pass it to smu_queue_simple or as 'done'
242 * member to smu_queue_cmd, it will call complete() on the struct
243 * completion passed in the "misc" argument
244 */
245extern void smu_done_complete(struct smu_cmd *cmd, void *misc);
6 246
7/* 247/*
8 * Basic routines for use by architecture. To be extended as 248 * Synchronous helpers. Will spin-wait for completion of a command
9 * we understand more of the chip 249 */
250extern void smu_spinwait_cmd(struct smu_cmd *cmd);
251
252static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd)
253{
254 smu_spinwait_cmd(&scmd->cmd);
255}
256
257/*
258 * Poll routine to call if blocked with irqs off
259 */
260extern void smu_poll(void);
261
262
263/*
264 * Init routine, presence check....
10 */ 265 */
11extern int smu_init(void); 266extern int smu_init(void);
12extern int smu_present(void); 267extern int smu_present(void);
268struct of_device;
269extern struct of_device *smu_get_ofdev(void);
270
271
272/*
273 * Common command wrappers
274 */
13extern void smu_shutdown(void); 275extern void smu_shutdown(void);
14extern void smu_restart(void); 276extern void smu_restart(void);
15extern int smu_get_rtc_time(struct rtc_time *time); 277struct rtc_time;
16extern int smu_set_rtc_time(struct rtc_time *time); 278extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
279extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
17 280
18/* 281/*
19 * SMU command buffer absolute address, exported by pmac_setup, 282 * SMU command buffer absolute address, exported by pmac_setup,
20 * this is allocated very early during boot. 283 * this is allocated very early during boot.
21 */ 284 */
22extern unsigned long smu_cmdbuf_abs; 285extern unsigned long smu_cmdbuf_abs;
286
287
288/*
289 * Kenrel asynchronous i2c interface
290 */
291
292/* SMU i2c header, exactly matches i2c header on wire */
293struct smu_i2c_param
294{
295 u8 bus; /* SMU bus ID (from device tree) */
296 u8 type; /* i2c transfer type */
297 u8 devaddr; /* device address (includes direction) */
298 u8 sublen; /* subaddress length */
299 u8 subaddr[3]; /* subaddress */
300 u8 caddr; /* combined address, filled by SMU driver */
301 u8 datalen; /* length of transfer */
302 u8 data[7]; /* data */
303};
304
305#define SMU_I2C_READ_MAX 0x0d
306#define SMU_I2C_WRITE_MAX 0x05
307
308struct smu_i2c_cmd
309{
310 /* public */
311 struct smu_i2c_param info;
312 void (*done)(struct smu_i2c_cmd *cmd, void *misc);
313 void *misc;
314 int status; /* 1 = pending, 0 = ok, <0 = fail */
315
316 /* private */
317 struct smu_cmd scmd;
318 int read;
319 int stage;
320 int retries;
321 u8 pdata[0x10];
322 struct list_head link;
323};
324
325/*
326 * Call this to queue an i2c command to the SMU. You must fill info,
327 * including info.data for a write, done and misc.
328 * For now, no polling interface is provided so you have to use completion
329 * callback.
330 */
331extern int smu_queue_i2c(struct smu_i2c_cmd *cmd);
332
333
334#endif /* __KERNEL__ */
335
336/*
337 * - Userland interface -
338 */
339
340/*
341 * A given instance of the device can be configured for 2 different
342 * things at the moment:
343 *
344 * - sending SMU commands (default at open() time)
345 * - receiving SMU events (not yet implemented)
346 *
347 * Commands are written with write() of a command block. They can be
348 * "driver" commands (for example to switch to event reception mode)
349 * or real SMU commands. They are made of a header followed by command
350 * data if any.
351 *
352 * For SMU commands (not for driver commands), you can then read() back
353 * a reply. The reader will be blocked or not depending on how the device
354 * file is opened. poll() isn't implemented yet. The reply will consist
355 * of a header as well, followed by the reply data if any. You should
356 * always provide a buffer large enough for the maximum reply data, I
357 * recommand one page.
358 *
359 * It is illegal to send SMU commands through a file descriptor configured
360 * for events reception
361 *
362 */
363struct smu_user_cmd_hdr
364{
365 __u32 cmdtype;
366#define SMU_CMDTYPE_SMU 0 /* SMU command */
367#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */
368
369 __u8 cmd; /* SMU command byte */
370 __u32 data_len; /* Lenght of data following */
371};
372
373struct smu_user_reply_hdr
374{
375 __u32 status; /* Command status */
376 __u32 reply_len; /* Lenght of data follwing */
377};
378
379#endif /* _SMU_H */
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 45411a67e082..74271d7c1d16 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -25,6 +25,7 @@ struct ppc64_tlb_batch {
25 pte_t pte[PPC64_TLB_BATCH_NR]; 25 pte_t pte[PPC64_TLB_BATCH_NR];
26 unsigned long addr[PPC64_TLB_BATCH_NR]; 26 unsigned long addr[PPC64_TLB_BATCH_NR];
27 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 27 unsigned long vaddr[PPC64_TLB_BATCH_NR];
28 unsigned int large;
28}; 29};
29DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 30DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
30 31
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h
index c181a60d868c..132c1276547b 100644
--- a/include/asm-ppc64/uaccess.h
+++ b/include/asm-ppc64/uaccess.h
@@ -164,7 +164,8 @@ do { \
164 164
165#define __get_user_nocheck(x,ptr,size) \ 165#define __get_user_nocheck(x,ptr,size) \
166({ \ 166({ \
167 long __gu_err, __gu_val; \ 167 long __gu_err; \
168 unsigned long __gu_val; \
168 might_sleep(); \ 169 might_sleep(); \
169 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ 170 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
170 (x) = (__typeof__(*(ptr)))__gu_val; \ 171 (x) = (__typeof__(*(ptr)))__gu_val; \
@@ -173,7 +174,8 @@ do { \
173 174
174#define __get_user_check(x,ptr,size) \ 175#define __get_user_check(x,ptr,size) \
175({ \ 176({ \
176 long __gu_err = -EFAULT, __gu_val = 0; \ 177 long __gu_err = -EFAULT; \
178 unsigned long __gu_val = 0; \
177 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 179 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
178 might_sleep(); \ 180 might_sleep(); \
179 if (access_ok(VERIFY_READ,__gu_addr,size)) \ 181 if (access_ok(VERIFY_READ,__gu_addr,size)) \
diff --git a/include/asm-s390/sigcontext.h b/include/asm-s390/sigcontext.h
index d57bc0cebdce..803545351dd8 100644
--- a/include/asm-s390/sigcontext.h
+++ b/include/asm-s390/sigcontext.h
@@ -61,7 +61,7 @@ typedef struct
61struct sigcontext 61struct sigcontext
62{ 62{
63 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; 63 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
64 _sigregs *sregs; 64 _sigregs __user *sregs;
65}; 65};
66 66
67 67
diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h
index 3d6e11c6c1fd..7084626de215 100644
--- a/include/asm-s390/signal.h
+++ b/include/asm-s390/signal.h
@@ -165,7 +165,7 @@ struct sigaction {
165#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
166 166
167typedef struct sigaltstack { 167typedef struct sigaltstack {
168 void *ss_sp; 168 void __user *ss_sp;
169 int ss_flags; 169 int ss_flags;
170 size_t ss_size; 170 size_t ss_size;
171} stack_t; 171} stack_t;
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 8f4f6a959651..8395ad2f1c09 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -82,6 +82,8 @@ extern unsigned long page_kernel;
82/* Top-level page directory */ 82/* Top-level page directory */
83extern pgd_t swapper_pg_dir[1024]; 83extern pgd_t swapper_pg_dir[1024];
84 84
85extern void paging_init(void);
86
85/* Page table for 0-4MB for everybody, on the Sparc this 87/* Page table for 0-4MB for everybody, on the Sparc this
86 * holds the same as on the i386. 88 * holds the same as on the i386.
87 */ 89 */
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index ededd2659eab..b3f61659ba81 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -66,6 +66,11 @@ extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
66#define flush_cache_vmap(start, end) do { } while (0) 66#define flush_cache_vmap(start, end) do { } while (0)
67#define flush_cache_vunmap(start, end) do { } while (0) 67#define flush_cache_vunmap(start, end) do { } while (0)
68 68
69#ifdef CONFIG_DEBUG_PAGEALLOC
70/* internal debugging function */
71void kernel_map_pages(struct page *page, int numpages, int enable);
72#endif
73
69#endif /* !__ASSEMBLY__ */ 74#endif /* !__ASSEMBLY__ */
70 75
71#endif /* _SPARC64_CACHEFLUSH_H */ 76#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 9a3a81f1cc58..74de79dca915 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -22,6 +22,16 @@ typedef struct {
22 unsigned int __pad1; 22 unsigned int __pad1;
23 unsigned long *pte_cache[2]; 23 unsigned long *pte_cache[2];
24 unsigned long *pgd_cache; 24 unsigned long *pgd_cache;
25
26 /* Dcache line 3, rarely used */
27 unsigned int dcache_size;
28 unsigned int dcache_line_size;
29 unsigned int icache_size;
30 unsigned int icache_line_size;
31 unsigned int ecache_size;
32 unsigned int ecache_line_size;
33 unsigned int __pad2;
34 unsigned int __pad3;
25} cpuinfo_sparc; 35} cpuinfo_sparc;
26 36
27DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index b63a33cf4971..0abd3a674e8f 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -12,9 +12,12 @@
12#define __JALAPENO_ID 0x003e0016 12#define __JALAPENO_ID 0x003e0016
13 13
14#define CHEETAH_MANUF 0x003e 14#define CHEETAH_MANUF 0x003e
15#define CHEETAH_IMPL 0x0014 15#define CHEETAH_IMPL 0x0014 /* Ultra-III */
16#define CHEETAH_PLUS_IMPL 0x0015 16#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */
17#define JALAPENO_IMPL 0x0016 17#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */
18#define JAGUAR_IMPL 0x0018 /* Ultra-IV */
19#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
20#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
18 21
19#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ 22#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
20 rdpr %ver, %tmp1; \ 23 rdpr %ver, %tmp1; \
diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h
index 0a336901d585..b4959d2b0d99 100644
--- a/include/asm-sparc64/openprom.h
+++ b/include/asm-sparc64/openprom.h
@@ -186,8 +186,8 @@ struct linux_prom_registers {
186}; 186};
187 187
188struct linux_prom64_registers { 188struct linux_prom64_registers {
189 long phys_addr; 189 unsigned long phys_addr;
190 long reg_size; 190 unsigned long reg_size;
191}; 191};
192 192
193struct linux_prom_irqs { 193struct linux_prom_irqs {
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index a432d9e7daaa..d02f1e8ae1a6 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -38,6 +38,20 @@ extern int prom_stdin, prom_stdout;
38 */ 38 */
39extern int prom_chosen_node; 39extern int prom_chosen_node;
40 40
41/* Helper values and strings in arch/sparc64/kernel/head.S */
42extern const char prom_finddev_name[];
43extern const char prom_chosen_path[];
44extern const char prom_getprop_name[];
45extern const char prom_mmu_name[];
46extern const char prom_callmethod_name[];
47extern const char prom_translate_name[];
48extern const char prom_map_name[];
49extern const char prom_unmap_name[];
50extern int prom_mmu_ihandle_cache;
51extern unsigned int prom_boot_mapped_pc;
52extern unsigned int prom_boot_mapping_mode;
53extern unsigned long prom_boot_mapping_phys_high, prom_boot_mapping_phys_low;
54
41struct linux_mlist_p1275 { 55struct linux_mlist_p1275 {
42 struct linux_mlist_p1275 *theres_more; 56 struct linux_mlist_p1275 *theres_more;
43 unsigned long start_adr; 57 unsigned long start_adr;
@@ -68,7 +82,7 @@ extern char *prom_getbootargs(void);
68 * of the string is different on V0 vs. V2->higher proms. The caller must 82 * of the string is different on V0 vs. V2->higher proms. The caller must
69 * know what he/she is doing! Returns the device descriptor, an int. 83 * know what he/she is doing! Returns the device descriptor, an int.
70 */ 84 */
71extern int prom_devopen(char *device_string); 85extern int prom_devopen(const char *device_string);
72 86
73/* Close a previously opened device described by the passed integer 87/* Close a previously opened device described by the passed integer
74 * descriptor. 88 * descriptor.
@@ -81,27 +95,13 @@ extern int prom_devclose(int device_handle);
81extern void prom_seek(int device_handle, unsigned int seek_hival, 95extern void prom_seek(int device_handle, unsigned int seek_hival,
82 unsigned int seek_lowval); 96 unsigned int seek_lowval);
83 97
84/* Machine memory configuration routine. */
85
86/* This function returns a V0 format memory descriptor table, it has three
87 * entries. One for the total amount of physical ram on the machine, one
88 * for the amount of physical ram available, and one describing the virtual
89 * areas which are allocated by the prom. So, in a sense the physical
90 * available is a calculation of the total physical minus the physical mapped
91 * by the prom with virtual mappings.
92 *
93 * These lists are returned pre-sorted, this should make your life easier
94 * since the prom itself is way too lazy to do such nice things.
95 */
96extern struct linux_mem_p1275 *prom_meminfo(void);
97
98/* Miscellaneous routines, don't really fit in any category per se. */ 98/* Miscellaneous routines, don't really fit in any category per se. */
99 99
100/* Reboot the machine with the command line passed. */ 100/* Reboot the machine with the command line passed. */
101extern void prom_reboot(char *boot_command); 101extern void prom_reboot(const char *boot_command);
102 102
103/* Evaluate the forth string passed. */ 103/* Evaluate the forth string passed. */
104extern void prom_feval(char *forth_string); 104extern void prom_feval(const char *forth_string);
105 105
106/* Enter the prom, with possibility of continuation with the 'go' 106/* Enter the prom, with possibility of continuation with the 'go'
107 * command in newer proms. 107 * command in newer proms.
@@ -154,7 +154,7 @@ extern char prom_getchar(void);
154extern void prom_putchar(char character); 154extern void prom_putchar(char character);
155 155
156/* Prom's internal routines, don't use in kernel/boot code. */ 156/* Prom's internal routines, don't use in kernel/boot code. */
157extern void prom_printf(char *fmt, ...); 157extern void prom_printf(const char *fmt, ...);
158extern void prom_write(const char *buf, unsigned int len); 158extern void prom_write(const char *buf, unsigned int len);
159 159
160/* Query for input device type */ 160/* Query for input device type */
@@ -215,7 +215,7 @@ extern int prom_getunumber(int syndrome_code,
215 char *buf, int buflen); 215 char *buf, int buflen);
216 216
217/* Retain physical memory to the caller across soft resets. */ 217/* Retain physical memory to the caller across soft resets. */
218extern unsigned long prom_retain(char *name, 218extern unsigned long prom_retain(const char *name,
219 unsigned long pa_low, unsigned long pa_high, 219 unsigned long pa_low, unsigned long pa_high,
220 long size, long align); 220 long size, long align);
221 221
@@ -269,28 +269,28 @@ extern int prom_getsibling(int node);
269/* Get the length, at the passed node, of the given property type. 269/* Get the length, at the passed node, of the given property type.
270 * Returns -1 on error (ie. no such property at this node). 270 * Returns -1 on error (ie. no such property at this node).
271 */ 271 */
272extern int prom_getproplen(int thisnode, char *property); 272extern int prom_getproplen(int thisnode, const char *property);
273 273
274/* Fetch the requested property using the given buffer. Returns 274/* Fetch the requested property using the given buffer. Returns
275 * the number of bytes the prom put into your buffer or -1 on error. 275 * the number of bytes the prom put into your buffer or -1 on error.
276 */ 276 */
277extern int prom_getproperty(int thisnode, char *property, 277extern int prom_getproperty(int thisnode, const char *property,
278 char *prop_buffer, int propbuf_size); 278 char *prop_buffer, int propbuf_size);
279 279
280/* Acquire an integer property. */ 280/* Acquire an integer property. */
281extern int prom_getint(int node, char *property); 281extern int prom_getint(int node, const char *property);
282 282
283/* Acquire an integer property, with a default value. */ 283/* Acquire an integer property, with a default value. */
284extern int prom_getintdefault(int node, char *property, int defval); 284extern int prom_getintdefault(int node, const char *property, int defval);
285 285
286/* Acquire a boolean property, 0=FALSE 1=TRUE. */ 286/* Acquire a boolean property, 0=FALSE 1=TRUE. */
287extern int prom_getbool(int node, char *prop); 287extern int prom_getbool(int node, const char *prop);
288 288
289/* Acquire a string property, null string on error. */ 289/* Acquire a string property, null string on error. */
290extern void prom_getstring(int node, char *prop, char *buf, int bufsize); 290extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
291 291
292/* Does the passed node have the given "name"? YES=1 NO=0 */ 292/* Does the passed node have the given "name"? YES=1 NO=0 */
293extern int prom_nodematch(int thisnode, char *name); 293extern int prom_nodematch(int thisnode, const char *name);
294 294
295/* Puts in buffer a prom name in the form name@x,y or name (x for which_io 295/* Puts in buffer a prom name in the form name@x,y or name (x for which_io
296 * and y for first regs phys address 296 * and y for first regs phys address
@@ -300,7 +300,7 @@ extern int prom_getname(int node, char *buf, int buflen);
300/* Search all siblings starting at the passed node for "name" matching 300/* Search all siblings starting at the passed node for "name" matching
301 * the given string. Returns the node on success, zero on failure. 301 * the given string. Returns the node on success, zero on failure.
302 */ 302 */
303extern int prom_searchsiblings(int node_start, char *name); 303extern int prom_searchsiblings(int node_start, const char *name);
304 304
305/* Return the first property type, as a string, for the given node. 305/* Return the first property type, as a string, for the given node.
306 * Returns a null string on error. Buffer should be at least 32B long. 306 * Returns a null string on error. Buffer should be at least 32B long.
@@ -310,21 +310,21 @@ extern char *prom_firstprop(int node, char *buffer);
310/* Returns the next property after the passed property for the given 310/* Returns the next property after the passed property for the given
311 * node. Returns null string on failure. Buffer should be at least 32B long. 311 * node. Returns null string on failure. Buffer should be at least 32B long.
312 */ 312 */
313extern char *prom_nextprop(int node, char *prev_property, char *buffer); 313extern char *prom_nextprop(int node, const char *prev_property, char *buffer);
314 314
315/* Returns 1 if the specified node has given property. */ 315/* Returns 1 if the specified node has given property. */
316extern int prom_node_has_property(int node, char *property); 316extern int prom_node_has_property(int node, const char *property);
317 317
318/* Returns phandle of the path specified */ 318/* Returns phandle of the path specified */
319extern int prom_finddevice(char *name); 319extern int prom_finddevice(const char *name);
320 320
321/* Set the indicated property at the given node with the passed value. 321/* Set the indicated property at the given node with the passed value.
322 * Returns the number of bytes of your value that the prom took. 322 * Returns the number of bytes of your value that the prom took.
323 */ 323 */
324extern int prom_setprop(int node, char *prop_name, char *prop_value, 324extern int prom_setprop(int node, const char *prop_name, char *prop_value,
325 int value_size); 325 int value_size);
326 326
327extern int prom_pathtoinode(char *path); 327extern int prom_pathtoinode(const char *path);
328extern int prom_inst2pkg(int); 328extern int prom_inst2pkg(int);
329 329
330/* CPU probing helpers. */ 330/* CPU probing helpers. */
@@ -334,7 +334,7 @@ int cpu_find_by_mid(int mid, int *prom_node);
334/* Client interface level routines. */ 334/* Client interface level routines. */
335extern void prom_set_trap_table(unsigned long tba); 335extern void prom_set_trap_table(unsigned long tba);
336 336
337extern long p1275_cmd (char *, long, ...); 337extern long p1275_cmd(const char *, long, ...);
338 338
339 339
340#if 0 340#if 0
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 7f8d764abc47..5426bb28a993 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -140,23 +140,6 @@ extern unsigned long page_to_pfn(struct page *);
140#define virt_to_phys __pa 140#define virt_to_phys __pa
141#define phys_to_virt __va 141#define phys_to_virt __va
142 142
143/* The following structure is used to hold the physical
144 * memory configuration of the machine. This is filled in
145 * probe_memory() and is later used by mem_init() to set up
146 * mem_map[]. We statically allocate SPARC_PHYS_BANKS of
147 * these structs, this is arbitrary. The entry after the
148 * last valid one has num_bytes==0.
149 */
150
151struct sparc_phys_banks {
152 unsigned long base_addr;
153 unsigned long num_bytes;
154};
155
156#define SPARC_PHYS_BANKS 32
157
158extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
159
160#endif /* !(__ASSEMBLY__) */ 143#endif /* !(__ASSEMBLY__) */
161 144
162#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 145#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index a297f6144f0f..8c6dfc6c7af6 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -60,13 +60,13 @@
60 * table can map 60 * table can map
61 */ 61 */
62#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 62#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
63#define PMD_SIZE (1UL << PMD_SHIFT) 63#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
64#define PMD_MASK (~(PMD_SIZE-1)) 64#define PMD_MASK (~(PMD_SIZE-1))
65#define PMD_BITS (PAGE_SHIFT - 2) 65#define PMD_BITS (PAGE_SHIFT - 2)
66 66
67/* PGDIR_SHIFT determines what a third-level page table entry can map */ 67/* PGDIR_SHIFT determines what a third-level page table entry can map */
68#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) 68#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
69#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 69#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
70#define PGDIR_MASK (~(PGDIR_SIZE-1)) 70#define PGDIR_MASK (~(PGDIR_SIZE-1))
71#define PGDIR_BITS (PAGE_SHIFT - 2) 71#define PGDIR_BITS (PAGE_SHIFT - 2)
72 72
@@ -98,7 +98,9 @@
98#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ 98#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */
99#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ 99#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */
100#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ 100#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
101#define _PAGE_RES1 _AC(0x0003000000000000,UL) /* Reserved */ 101#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */
102#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
103#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
102#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ 104#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
103#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ 105#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */
104#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ 106#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/
@@ -336,7 +338,11 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
336#define pte_clear(mm,addr,ptep) \ 338#define pte_clear(mm,addr,ptep) \
337 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 339 set_pte_at((mm), (addr), (ptep), __pte(0UL))
338 340
339extern pgd_t swapper_pg_dir[1]; 341extern pgd_t swapper_pg_dir[2048];
342extern pmd_t swapper_low_pmd_dir[2048];
343
344extern void paging_init(void);
345extern unsigned long find_ecache_flush_span(unsigned long size);
340 346
341/* These do nothing with the way I have things setup. */ 347/* These do nothing with the way I have things setup. */
342#define mmu_lockarea(vaddr, len) (vaddr) 348#define mmu_lockarea(vaddr, len) (vaddr)
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index 80a65d7e3dbf..203e8eee6351 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
70 * with the main instruction path. This means when everything is well, 70 * with the main instruction path. This means when everything is well,
71 * we don't even have to jump over them. Further, they do not intrude 71 * we don't even have to jump over them. Further, they do not intrude
72 * on our cache or tlb entries. 72 * on our cache or tlb entries.
73 *
74 * There is a special way how to put a range of potentially faulting
75 * insns (like twenty ldd/std's with now intervening other instructions)
76 * You specify address of first in insn and 0 in fixup and in the next
77 * exception_table_entry you specify last potentially faulting insn + 1
78 * and in fixup the routine which should handle the fault.
79 * That fixup code will get
80 * (faulting_insn_address - first_insn_in_the_range_address)/4
81 * in %g2 (ie. index of the faulting instruction in the range).
82 */ 73 */
83 74
84struct exception_table_entry 75struct exception_table_entry {
85{ 76 unsigned int insn, fixup;
86 unsigned insn, fixup;
87}; 77};
88 78
89/* Special exable search, which handles ranges. Returns fixup */
90unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
91
92extern void __ret_efault(void); 79extern void __ret_efault(void);
80extern void __retl_efault(void);
93 81
94/* Uh, these should become the main single-value transfer routines.. 82/* Uh, these should become the main single-value transfer routines..
95 * They automatically use the right size if we just have the right 83 * They automatically use the right size if we just have the right
@@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
263{ 251{
264 unsigned long ret = ___copy_from_user(to, from, size); 252 unsigned long ret = ___copy_from_user(to, from, size);
265 253
266 if (ret) 254 if (unlikely(ret))
267 ret = copy_from_user_fixup(to, from, size); 255 ret = copy_from_user_fixup(to, from, size);
268 return ret; 256 return ret;
269} 257}
@@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
279{ 267{
280 unsigned long ret = ___copy_to_user(to, from, size); 268 unsigned long ret = ___copy_to_user(to, from, size);
281 269
282 if (ret) 270 if (unlikely(ret))
283 ret = copy_to_user_fixup(to, from, size); 271 ret = copy_to_user_fixup(to, from, size);
284 return ret; 272 return ret;
285} 273}
@@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
295{ 283{
296 unsigned long ret = ___copy_in_user(to, from, size); 284 unsigned long ret = ___copy_in_user(to, from, size);
297 285
298 if (ret) 286 if (unlikely(ret))
299 ret = copy_in_user_fixup(to, from, size); 287 ret = copy_in_user_fixup(to, from, size);
300 return ret; 288 return ret;
301} 289}
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index ed06170e0edd..616d02b57ea9 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -346,7 +346,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
346static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 346static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
347{ 347{
348 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 348 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
349 if(pte_present(pte)) pte = pte_mknewpage(pte_mknewprot(pte));
350 return pte; 349 return pte;
351} 350}
352 351
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 801710d00a40..2ee028b8de9d 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -44,7 +44,7 @@
44 const __typeof__(ptr) __private_ptr = ptr; \ 44 const __typeof__(ptr) __private_ptr = ptr; \
45 __typeof__(*(__private_ptr)) __private_val; \ 45 __typeof__(*(__private_ptr)) __private_val; \
46 int __private_ret = -EFAULT; \ 46 int __private_ret = -EFAULT; \
47 (x) = 0; \ 47 (x) = (__typeof__(*(__private_ptr)))0; \
48 if (__copy_from_user(&__private_val, (__private_ptr), \ 48 if (__copy_from_user(&__private_val, (__private_ptr), \
49 sizeof(*(__private_ptr))) == 0) {\ 49 sizeof(*(__private_ptr))) == 0) {\
50 (x) = (__typeof__(*(__private_ptr))) __private_val; \ 50 (x) = (__typeof__(*(__private_ptr))) __private_val; \
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 4d727f3f5550..5a7fe3c6c3d8 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -234,6 +234,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
234#define MSR_K8_TOP_MEM1 0xC001001A 234#define MSR_K8_TOP_MEM1 0xC001001A
235#define MSR_K8_TOP_MEM2 0xC001001D 235#define MSR_K8_TOP_MEM2 0xC001001D
236#define MSR_K8_SYSCFG 0xC0010010 236#define MSR_K8_SYSCFG 0xC0010010
237#define MSR_K8_HWCR 0xC0010015
237 238
238/* K6 MSRs */ 239/* K6 MSRs */
239#define MSR_K6_EFER 0xC0000080 240#define MSR_K6_EFER 0xC0000080
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 2cb483516459..dd8711ecaf2f 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -384,7 +384,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
384} 384}
385 385
386#define pte_index(address) \ 386#define pte_index(address) \
387 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 387 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
388#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ 388#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
389 pte_index(address)) 389 pte_index(address))
390 390
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 24f86f0e43cf..12b5732dc6e5 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/system.h> 23#include <asm/system.h>
24 24
25#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 25#define ATOMIC_INIT(i) { (i) }
26 26
27/* 27/*
28 * This Xtensa implementation assumes that the right mechanism 28 * This Xtensa implementation assumes that the right mechanism
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index d395ef226c32..e76ee889e21d 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr)
174 return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); 174 return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
175} 175}
176 176
177#if XCHAL_HAVE_NSAU 177#if XCHAL_HAVE_NSA
178 178
179static __inline__ int __cntlz (unsigned long x) 179static __inline__ int __cntlz (unsigned long x)
180{ 180{
diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h
index e07c76c36b95..aa9c1adf68d7 100644
--- a/include/asm-xtensa/hardirq.h
+++ b/include/asm-xtensa/hardirq.h
@@ -23,6 +23,7 @@ typedef struct {
23 unsigned int __nmi_count; /* arch dependent */ 23 unsigned int __nmi_count; /* arch dependent */
24} ____cacheline_aligned irq_cpustat_t; 24} ____cacheline_aligned irq_cpustat_t;
25 25
26void ack_bad_irq(unsigned int irq);
26#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 27#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
27 28
28#endif /* _XTENSA_HARDIRQ_H */ 29#endif /* _XTENSA_HARDIRQ_H */
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index db740b8bc6f0..09e89ab3eb61 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -20,28 +20,19 @@ struct semaphore {
20 atomic_t count; 20 atomic_t count;
21 int sleepers; 21 int sleepers;
22 wait_queue_head_t wait; 22 wait_queue_head_t wait;
23#if WAITQUEUE_DEBUG
24 long __magic;
25#endif
26}; 23};
27 24
28#if WAITQUEUE_DEBUG 25#define __SEMAPHORE_INITIALIZER(name,n) \
29# define __SEM_DEBUG_INIT(name) \ 26{ \
30 , (int)&(name).__magic 27 .count = ATOMIC_INIT(n), \
31#else 28 .sleepers = 0, \
32# define __SEM_DEBUG_INIT(name) 29 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33#endif 30}
34
35#define __SEMAPHORE_INITIALIZER(name,count) \
36 { ATOMIC_INIT(count), \
37 0, \
38 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
39 __SEM_DEBUG_INIT(name) }
40 31
41#define __MUTEX_INITIALIZER(name) \ 32#define __MUTEX_INITIALIZER(name) \
42 __SEMAPHORE_INITIALIZER(name, 1) 33 __SEMAPHORE_INITIALIZER(name, 1)
43 34
44#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
45 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
46 37
47#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) 38#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
@@ -49,17 +40,8 @@ struct semaphore {
49 40
50static inline void sema_init (struct semaphore *sem, int val) 41static inline void sema_init (struct semaphore *sem, int val)
51{ 42{
52/*
53 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
54 *
55 * i'd rather use the more flexible initialization above, but sadly
56 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
57 */
58 atomic_set(&sem->count, val); 43 atomic_set(&sem->count, val);
59 init_waitqueue_head(&sem->wait); 44 init_waitqueue_head(&sem->wait);
60#if WAITQUEUE_DEBUG
61 sem->__magic = (int)&sem->__magic;
62#endif
63} 45}
64 46
65static inline void init_MUTEX (struct semaphore *sem) 47static inline void init_MUTEX (struct semaphore *sem)
@@ -81,9 +63,7 @@ extern spinlock_t semaphore_wake_lock;
81 63
82static inline void down(struct semaphore * sem) 64static inline void down(struct semaphore * sem)
83{ 65{
84#if WAITQUEUE_DEBUG 66 might_sleep();
85 CHECK_MAGIC(sem->__magic);
86#endif
87 67
88 if (atomic_sub_return(1, &sem->count) < 0) 68 if (atomic_sub_return(1, &sem->count) < 0)
89 __down(sem); 69 __down(sem);
@@ -92,9 +72,8 @@ static inline void down(struct semaphore * sem)
92static inline int down_interruptible(struct semaphore * sem) 72static inline int down_interruptible(struct semaphore * sem)
93{ 73{
94 int ret = 0; 74 int ret = 0;
95#if WAITQUEUE_DEBUG 75
96 CHECK_MAGIC(sem->__magic); 76 might_sleep();
97#endif
98 77
99 if (atomic_sub_return(1, &sem->count) < 0) 78 if (atomic_sub_return(1, &sem->count) < 0)
100 ret = __down_interruptible(sem); 79 ret = __down_interruptible(sem);
@@ -104,9 +83,6 @@ static inline int down_interruptible(struct semaphore * sem)
104static inline int down_trylock(struct semaphore * sem) 83static inline int down_trylock(struct semaphore * sem)
105{ 84{
106 int ret = 0; 85 int ret = 0;
107#if WAITQUEUE_DEBUG
108 CHECK_MAGIC(sem->__magic);
109#endif
110 86
111 if (atomic_sub_return(1, &sem->count) < 0) 87 if (atomic_sub_return(1, &sem->count) < 0)
112 ret = __down_trylock(sem); 88 ret = __down_trylock(sem);
@@ -119,9 +95,6 @@ static inline int down_trylock(struct semaphore * sem)
119 */ 95 */
120static inline void up(struct semaphore * sem) 96static inline void up(struct semaphore * sem)
121{ 97{
122#if WAITQUEUE_DEBUG
123 CHECK_MAGIC(sem->__magic);
124#endif
125 if (atomic_add_return(1, &sem->count) <= 0) 98 if (atomic_add_return(1, &sem->count) <= 0)
126 __up(sem); 99 __up(sem);
127} 100}
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index f09393232e5e..9284867f1cb9 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
189 189
190#define tas(ptr) (xchg((ptr),1)) 190#define tas(ptr) (xchg((ptr),1))
191 191
192#if ( __XCC__ == 1 )
193
194/* xt-xcc processes __inline__ differently than xt-gcc and decides to
195 * insert an out-of-line copy of function __xchg. This presents the
196 * unresolved symbol at link time of __xchg_called_with_bad_pointer,
197 * even though such a function would never be called at run-time.
198 * xt-gcc always inlines __xchg, and optimizes away the undefined
199 * bad_pointer function.
200 */
201
202#define xchg(ptr,x) xchg_u32(ptr,x)
203
204#else /* assume xt-gcc */
205
206#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 192#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
207 193
208/* 194/*
@@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
224 return x; 210 return x;
225} 211}
226 212
227#endif
228
229extern void set_except_vector(int n, void *addr); 213extern void set_except_vector(int n, void *addr);
230 214
231static inline void spill_registers(void) 215static inline void spill_registers(void)
diff --git a/include/linux/aio.h b/include/linux/aio.h
index a4d5af907f90..60def658b246 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -43,6 +43,40 @@ struct kioctx;
43#define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags) 43#define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags)
44#define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) 44#define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
45 45
46/* is there a better place to document function pointer methods? */
47/**
48 * ki_retry - iocb forward progress callback
49 * @kiocb: The kiocb struct to advance by performing an operation.
50 *
51 * This callback is called when the AIO core wants a given AIO operation
52 * to make forward progress. The kiocb argument describes the operation
53 * that is to be performed. As the operation proceeds, perhaps partially,
54 * ki_retry is expected to update the kiocb with progress made. Typically
55 * ki_retry is set in the AIO core and it itself calls file_operations
56 * helpers.
57 *
58 * ki_retry's return value determines when the AIO operation is completed
59 * and an event is generated in the AIO event ring. Except the special
60 * return values described below, the value that is returned from ki_retry
61 * is transferred directly into the completion ring as the operation's
62 * resulting status. Once this has happened ki_retry *MUST NOT* reference
63 * the kiocb pointer again.
64 *
65 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
66 * will be called on the kiocb pointer in the future. The AIO core will
67 * not ask the method again -- ki_retry must ensure forward progress.
68 * aio_complete() must be called once and only once in the future, multiple
69 * calls may result in undefined behaviour.
70 *
71 * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
72 * will be called on the kiocb pointer in the future. This may happen
73 * through generic helpers that associate kiocb->ki_wait with a wait
74 * queue head that ki_retry uses via current->io_wait. It can also happen
75 * with custom tracking and manual calls to kick_iocb(), though that is
76 * discouraged. In either case, kick_iocb() must be called once and only
77 * once. ki_retry must ensure forward progress, the AIO core will wait
78 * indefinitely for kick_iocb() to be called.
79 */
46struct kiocb { 80struct kiocb {
47 struct list_head ki_run_list; 81 struct list_head ki_run_list;
48 long ki_flags; 82 long ki_flags;
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 96de26301f84..86d4b0a81713 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -104,12 +104,19 @@ struct cn_queue_dev {
104 struct sock *nls; 104 struct sock *nls;
105}; 105};
106 106
107struct cn_callback { 107struct cn_callback_id {
108 unsigned char name[CN_CBQ_NAMELEN]; 108 unsigned char name[CN_CBQ_NAMELEN];
109
110 struct cb_id id; 109 struct cb_id id;
110};
111
112struct cn_callback_data {
113 void (*destruct_data) (void *);
114 void *ddata;
115
116 void *callback_priv;
111 void (*callback) (void *); 117 void (*callback) (void *);
112 void *priv; 118
119 void *free;
113}; 120};
114 121
115struct cn_callback_entry { 122struct cn_callback_entry {
@@ -118,8 +125,8 @@ struct cn_callback_entry {
118 struct work_struct work; 125 struct work_struct work;
119 struct cn_queue_dev *pdev; 126 struct cn_queue_dev *pdev;
120 127
121 void (*destruct_data) (void *); 128 struct cn_callback_id id;
122 void *ddata; 129 struct cn_callback_data data;
123 130
124 int seq, group; 131 int seq, group;
125 struct sock *nls; 132 struct sock *nls;
@@ -144,7 +151,7 @@ int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
144void cn_del_callback(struct cb_id *); 151void cn_del_callback(struct cb_id *);
145int cn_netlink_send(struct cn_msg *, u32, int); 152int cn_netlink_send(struct cn_msg *, u32, int);
146 153
147int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb); 154int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
148void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 155void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
149 156
150struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); 157struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
@@ -152,6 +159,8 @@ void cn_queue_free_dev(struct cn_queue_dev *dev);
152 159
153int cn_cb_equal(struct cb_id *, struct cb_id *); 160int cn_cb_equal(struct cb_id *, struct cb_id *);
154 161
162void cn_queue_wrapper(void *data);
163
155extern int cn_already_initialized; 164extern int cn_already_initialized;
156 165
157#endif /* __KERNEL__ */ 166#endif /* __KERNEL__ */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index fc2d4c8225aa..d21c305c6c64 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -111,7 +111,9 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
111 return (struct ethhdr *)skb->mac.raw; 111 return (struct ethhdr *)skb->mac.raw;
112} 112}
113 113
114#ifdef CONFIG_SYSCTL
114extern struct ctl_table ether_table[]; 115extern struct ctl_table ether_table[];
115#endif 116#endif
117#endif
116 118
117#endif /* _LINUX_IF_ETHER_H */ 119#endif /* _LINUX_IF_ETHER_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 7e1e15f934f3..fd7af86151b1 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -142,13 +142,21 @@ static __inline__ int bad_mask(u32 mask, u32 addr)
142 142
143#define endfor_ifa(in_dev) } 143#define endfor_ifa(in_dev) }
144 144
145static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
146{
147 struct in_device *in_dev = dev->ip_ptr;
148 if (in_dev)
149 in_dev = rcu_dereference(in_dev);
150 return in_dev;
151}
152
145static __inline__ struct in_device * 153static __inline__ struct in_device *
146in_dev_get(const struct net_device *dev) 154in_dev_get(const struct net_device *dev)
147{ 155{
148 struct in_device *in_dev; 156 struct in_device *in_dev;
149 157
150 rcu_read_lock(); 158 rcu_read_lock();
151 in_dev = dev->ip_ptr; 159 in_dev = __in_dev_get_rcu(dev);
152 if (in_dev) 160 if (in_dev)
153 atomic_inc(&in_dev->refcnt); 161 atomic_inc(&in_dev->refcnt);
154 rcu_read_unlock(); 162 rcu_read_unlock();
@@ -156,7 +164,7 @@ in_dev_get(const struct net_device *dev)
156} 164}
157 165
158static __inline__ struct in_device * 166static __inline__ struct in_device *
159__in_dev_get(const struct net_device *dev) 167__in_dev_get_rtnl(const struct net_device *dev)
160{ 168{
161 return (struct in_device*)dev->ip_ptr; 169 return (struct in_device*)dev->ip_ptr;
162} 170}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index bb6f88e14061..e0b922785d98 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -372,8 +372,9 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
372#define inet_v6_ipv6only(__sk) 0 372#define inet_v6_ipv6only(__sk) 0
373#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 373#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
374 374
375#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ 375#define INET6_MATCH(__sk, __hash, __saddr, __daddr, __ports, __dif)\
376 (((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 376 (((__sk)->sk_hash == (__hash)) && \
377 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
377 ((__sk)->sk_family == AF_INET6) && \ 378 ((__sk)->sk_family == AF_INET6) && \
378 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ 379 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
379 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ 380 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
index cc326174a808..918c34a8347e 100644
--- a/include/linux/key-ui.h
+++ b/include/linux/key-ui.h
@@ -42,11 +42,14 @@ struct keyring_list {
42/* 42/*
43 * check to see whether permission is granted to use a key in the desired way 43 * check to see whether permission is granted to use a key in the desired way
44 */ 44 */
45static inline int key_permission(const struct key *key, key_perm_t perm) 45static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
46{ 46{
47 struct key *key = key_ref_to_ptr(key_ref);
47 key_perm_t kperm; 48 key_perm_t kperm;
48 49
49 if (key->uid == current->fsuid) 50 if (is_key_possessed(key_ref))
51 kperm = key->perm >> 24;
52 else if (key->uid == current->fsuid)
50 kperm = key->perm >> 16; 53 kperm = key->perm >> 16;
51 else if (key->gid != -1 && 54 else if (key->gid != -1 &&
52 key->perm & KEY_GRP_ALL && 55 key->perm & KEY_GRP_ALL &&
@@ -65,11 +68,14 @@ static inline int key_permission(const struct key *key, key_perm_t perm)
65 * check to see whether permission is granted to use a key in at least one of 68 * check to see whether permission is granted to use a key in at least one of
66 * the desired ways 69 * the desired ways
67 */ 70 */
68static inline int key_any_permission(const struct key *key, key_perm_t perm) 71static inline int key_any_permission(const key_ref_t key_ref, key_perm_t perm)
69{ 72{
73 struct key *key = key_ref_to_ptr(key_ref);
70 key_perm_t kperm; 74 key_perm_t kperm;
71 75
72 if (key->uid == current->fsuid) 76 if (is_key_possessed(key_ref))
77 kperm = key->perm >> 24;
78 else if (key->uid == current->fsuid)
73 kperm = key->perm >> 16; 79 kperm = key->perm >> 16;
74 else if (key->gid != -1 && 80 else if (key->gid != -1 &&
75 key->perm & KEY_GRP_ALL && 81 key->perm & KEY_GRP_ALL &&
@@ -94,13 +100,17 @@ static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid)
94 return ret; 100 return ret;
95} 101}
96 102
97static inline int key_task_permission(const struct key *key, 103static inline int key_task_permission(const key_ref_t key_ref,
98 struct task_struct *context, 104 struct task_struct *context,
99 key_perm_t perm) 105 key_perm_t perm)
100{ 106{
107 struct key *key = key_ref_to_ptr(key_ref);
101 key_perm_t kperm; 108 key_perm_t kperm;
102 109
103 if (key->uid == context->fsuid) { 110 if (is_key_possessed(key_ref)) {
111 kperm = key->perm >> 24;
112 }
113 else if (key->uid == context->fsuid) {
104 kperm = key->perm >> 16; 114 kperm = key->perm >> 16;
105 } 115 }
106 else if (key->gid != -1 && 116 else if (key->gid != -1 &&
@@ -121,9 +131,9 @@ static inline int key_task_permission(const struct key *key,
121 131
122} 132}
123 133
124extern struct key *lookup_user_key(struct task_struct *context, 134extern key_ref_t lookup_user_key(struct task_struct *context,
125 key_serial_t id, int create, int partial, 135 key_serial_t id, int create, int partial,
126 key_perm_t perm); 136 key_perm_t perm);
127 137
128extern long join_session_keyring(const char *name); 138extern long join_session_keyring(const char *name);
129 139
diff --git a/include/linux/key.h b/include/linux/key.h
index 970bbd916cf4..f1efa016dbf3 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -35,11 +35,18 @@ struct key;
35 35
36#undef KEY_DEBUGGING 36#undef KEY_DEBUGGING
37 37
38#define KEY_USR_VIEW 0x00010000 /* user can view a key's attributes */ 38#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */
39#define KEY_USR_READ 0x00020000 /* user can read key payload / view keyring */ 39#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */
40#define KEY_USR_WRITE 0x00040000 /* user can update key payload / add link to keyring */ 40#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */
41#define KEY_USR_SEARCH 0x00080000 /* user can find a key in search / search a keyring */ 41#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */
42#define KEY_USR_LINK 0x00100000 /* user can create a link to a key/keyring */ 42#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */
43#define KEY_POS_ALL 0x1f000000
44
45#define KEY_USR_VIEW 0x00010000 /* user permissions... */
46#define KEY_USR_READ 0x00020000
47#define KEY_USR_WRITE 0x00040000
48#define KEY_USR_SEARCH 0x00080000
49#define KEY_USR_LINK 0x00100000
43#define KEY_USR_ALL 0x001f0000 50#define KEY_USR_ALL 0x001f0000
44 51
45#define KEY_GRP_VIEW 0x00000100 /* group permissions... */ 52#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
@@ -67,6 +74,38 @@ struct keyring_name;
67 74
68/*****************************************************************************/ 75/*****************************************************************************/
69/* 76/*
77 * key reference with possession attribute handling
78 *
79 * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually
80 * defined. This is because we abuse the bottom bit of the reference to carry a
81 * flag to indicate whether the calling process possesses that key in one of
82 * its keyrings.
83 *
84 * the key_ref_t has been made a separate type so that the compiler can reject
85 * attempts to dereference it without proper conversion.
86 *
87 * the three functions are used to assemble and disassemble references
88 */
89typedef struct __key_reference_with_attributes *key_ref_t;
90
91static inline key_ref_t make_key_ref(const struct key *key,
92 unsigned long possession)
93{
94 return (key_ref_t) ((unsigned long) key | possession);
95}
96
97static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
98{
99 return (struct key *) ((unsigned long) key_ref & ~1UL);
100}
101
102static inline unsigned long is_key_possessed(const key_ref_t key_ref)
103{
104 return (unsigned long) key_ref & 1UL;
105}
106
107/*****************************************************************************/
108/*
70 * authentication token / access credential / keyring 109 * authentication token / access credential / keyring
71 * - types of key include: 110 * - types of key include:
72 * - keyrings 111 * - keyrings
@@ -215,20 +254,25 @@ static inline struct key *key_get(struct key *key)
215 return key; 254 return key;
216} 255}
217 256
257static inline void key_ref_put(key_ref_t key_ref)
258{
259 key_put(key_ref_to_ptr(key_ref));
260}
261
218extern struct key *request_key(struct key_type *type, 262extern struct key *request_key(struct key_type *type,
219 const char *description, 263 const char *description,
220 const char *callout_info); 264 const char *callout_info);
221 265
222extern int key_validate(struct key *key); 266extern int key_validate(struct key *key);
223 267
224extern struct key *key_create_or_update(struct key *keyring, 268extern key_ref_t key_create_or_update(key_ref_t keyring,
225 const char *type, 269 const char *type,
226 const char *description, 270 const char *description,
227 const void *payload, 271 const void *payload,
228 size_t plen, 272 size_t plen,
229 int not_in_quota); 273 int not_in_quota);
230 274
231extern int key_update(struct key *key, 275extern int key_update(key_ref_t key,
232 const void *payload, 276 const void *payload,
233 size_t plen); 277 size_t plen);
234 278
@@ -243,9 +287,9 @@ extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
243 287
244extern int keyring_clear(struct key *keyring); 288extern int keyring_clear(struct key *keyring);
245 289
246extern struct key *keyring_search(struct key *keyring, 290extern key_ref_t keyring_search(key_ref_t keyring,
247 struct key_type *type, 291 struct key_type *type,
248 const char *description); 292 const char *description);
249 293
250extern int keyring_add_key(struct key *keyring, 294extern int keyring_add_key(struct key *keyring,
251 struct key *key); 295 struct key *key);
@@ -285,6 +329,10 @@ extern void key_init(void);
285#define key_serial(k) 0 329#define key_serial(k) 0
286#define key_get(k) ({ NULL; }) 330#define key_get(k) ({ NULL; })
287#define key_put(k) do { } while(0) 331#define key_put(k) do { } while(0)
332#define key_ref_put(k) do { } while(0)
333#define make_key_ref(k) ({ NULL; })
334#define key_ref_to_ptr(k) ({ NULL; })
335#define is_key_possessed(k) 0
288#define alloc_uid_keyring(u) 0 336#define alloc_uid_keyring(u) 0
289#define switch_uid_keyring(u) do { } while(0) 337#define switch_uid_keyring(u) do { } while(0)
290#define __install_session_keyring(t, k) ({ NULL; }) 338#define __install_session_keyring(t, k) ({ NULL; })
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 022105c745fc..ceee1fc42c60 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -393,6 +393,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i
393extern void ata_pci_remove_one (struct pci_dev *pdev); 393extern void ata_pci_remove_one (struct pci_dev *pdev);
394#endif /* CONFIG_PCI */ 394#endif /* CONFIG_PCI */
395extern int ata_device_add(struct ata_probe_ent *ent); 395extern int ata_device_add(struct ata_probe_ent *ent);
396extern void ata_host_set_remove(struct ata_host_set *host_set);
396extern int ata_scsi_detect(Scsi_Host_Template *sht); 397extern int ata_scsi_detect(Scsi_Host_Template *sht);
397extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 398extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
398extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 399extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 47da39ba3f03..2f0299a448f6 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -183,7 +183,7 @@ struct of_device_id
183 char name[32]; 183 char name[32];
184 char type[32]; 184 char type[32];
185 char compatible[128]; 185 char compatible[128];
186#if __KERNEL__ 186#ifdef __KERNEL__
187 void *data; 187 void *data;
188#else 188#else
189 kernel_ulong_t data; 189 kernel_ulong_t data;
@@ -209,10 +209,11 @@ struct pcmcia_device_id {
209 /* for real multi-function devices */ 209 /* for real multi-function devices */
210 __u8 function; 210 __u8 function;
211 211
212 /* for pseude multi-function devices */ 212 /* for pseudo multi-function devices */
213 __u8 device_no; 213 __u8 device_no;
214 214
215 __u32 prod_id_hash[4]; 215 __u32 prod_id_hash[4]
216 __attribute__((aligned(sizeof(__u32))));
216 217
217 /* not matched against in kernelspace*/ 218 /* not matched against in kernelspace*/
218#ifdef __KERNEL__ 219#ifdef __KERNEL__
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 98c98e6cd4f3..a9281b24c40b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -265,6 +265,8 @@ struct net_device
265 * the interface. 265 * the interface.
266 */ 266 */
267 char name[IFNAMSIZ]; 267 char name[IFNAMSIZ];
268 /* device name hash chain */
269 struct hlist_node name_hlist;
268 270
269 /* 271 /*
270 * I/O specific fields 272 * I/O specific fields
@@ -292,6 +294,21 @@ struct net_device
292 294
293 /* ------- Fields preinitialized in Space.c finish here ------- */ 295 /* ------- Fields preinitialized in Space.c finish here ------- */
294 296
297 /* Net device features */
298 unsigned long features;
299#define NETIF_F_SG 1 /* Scatter/gather IO. */
300#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */
301#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
302#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
303#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
304#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
305#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
306#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
307#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
308#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
309#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
310#define NETIF_F_LLTX 4096 /* LockLess TX */
311
295 struct net_device *next_sched; 312 struct net_device *next_sched;
296 313
297 /* Interface index. Unique device identifier */ 314 /* Interface index. Unique device identifier */
@@ -316,9 +333,6 @@ struct net_device
316 * will (read: may be cleaned up at will). 333 * will (read: may be cleaned up at will).
317 */ 334 */
318 335
319 /* These may be needed for future network-power-down code. */
320 unsigned long trans_start; /* Time (in jiffies) of last Tx */
321 unsigned long last_rx; /* Time of last Rx */
322 336
323 unsigned short flags; /* interface flags (a la BSD) */ 337 unsigned short flags; /* interface flags (a la BSD) */
324 unsigned short gflags; 338 unsigned short gflags;
@@ -328,15 +342,12 @@ struct net_device
328 unsigned mtu; /* interface MTU value */ 342 unsigned mtu; /* interface MTU value */
329 unsigned short type; /* interface hardware type */ 343 unsigned short type; /* interface hardware type */
330 unsigned short hard_header_len; /* hardware hdr length */ 344 unsigned short hard_header_len; /* hardware hdr length */
331 void *priv; /* pointer to private data */
332 345
333 struct net_device *master; /* Pointer to master device of a group, 346 struct net_device *master; /* Pointer to master device of a group,
334 * which this device is member of. 347 * which this device is member of.
335 */ 348 */
336 349
337 /* Interface address info. */ 350 /* Interface address info. */
338 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
339 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
340 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 351 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
341 unsigned char addr_len; /* hardware address length */ 352 unsigned char addr_len; /* hardware address length */
342 unsigned short dev_id; /* for shared network cards */ 353 unsigned short dev_id; /* for shared network cards */
@@ -346,8 +357,6 @@ struct net_device
346 int promiscuity; 357 int promiscuity;
347 int allmulti; 358 int allmulti;
348 359
349 int watchdog_timeo;
350 struct timer_list watchdog_timer;
351 360
352 /* Protocol specific pointers */ 361 /* Protocol specific pointers */
353 362
@@ -358,32 +367,62 @@ struct net_device
358 void *ec_ptr; /* Econet specific data */ 367 void *ec_ptr; /* Econet specific data */
359 void *ax25_ptr; /* AX.25 specific data */ 368 void *ax25_ptr; /* AX.25 specific data */
360 369
361 struct list_head poll_list; /* Link to poll list */ 370/*
371 * Cache line mostly used on receive path (including eth_type_trans())
372 */
373 struct list_head poll_list ____cacheline_aligned_in_smp;
374 /* Link to poll list */
375
376 int (*poll) (struct net_device *dev, int *quota);
362 int quota; 377 int quota;
363 int weight; 378 int weight;
379 unsigned long last_rx; /* Time of last Rx */
380 /* Interface address info used in eth_type_trans() */
381 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
382 because most packets are unicast) */
383
384 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
364 385
386/*
387 * Cache line mostly used on queue transmit path (qdisc)
388 */
389 /* device queue lock */
390 spinlock_t queue_lock ____cacheline_aligned_in_smp;
365 struct Qdisc *qdisc; 391 struct Qdisc *qdisc;
366 struct Qdisc *qdisc_sleeping; 392 struct Qdisc *qdisc_sleeping;
367 struct Qdisc *qdisc_ingress;
368 struct list_head qdisc_list; 393 struct list_head qdisc_list;
369 unsigned long tx_queue_len; /* Max frames per queue allowed */ 394 unsigned long tx_queue_len; /* Max frames per queue allowed */
370 395
371 /* ingress path synchronizer */ 396 /* ingress path synchronizer */
372 spinlock_t ingress_lock; 397 spinlock_t ingress_lock;
398 struct Qdisc *qdisc_ingress;
399
400/*
401 * One part is mostly used on xmit path (device)
402 */
373 /* hard_start_xmit synchronizer */ 403 /* hard_start_xmit synchronizer */
374 spinlock_t xmit_lock; 404 spinlock_t xmit_lock ____cacheline_aligned_in_smp;
375 /* cpu id of processor entered to hard_start_xmit or -1, 405 /* cpu id of processor entered to hard_start_xmit or -1,
376 if nobody entered there. 406 if nobody entered there.
377 */ 407 */
378 int xmit_lock_owner; 408 int xmit_lock_owner;
379 /* device queue lock */ 409 void *priv; /* pointer to private data */
380 spinlock_t queue_lock; 410 int (*hard_start_xmit) (struct sk_buff *skb,
411 struct net_device *dev);
412 /* These may be needed for future network-power-down code. */
413 unsigned long trans_start; /* Time (in jiffies) of last Tx */
414
415 int watchdog_timeo; /* used by dev_watchdog() */
416 struct timer_list watchdog_timer;
417
418/*
419 * refcnt is a very hot point, so align it on SMP
420 */
381 /* Number of references to this device */ 421 /* Number of references to this device */
382 atomic_t refcnt; 422 atomic_t refcnt ____cacheline_aligned_in_smp;
423
383 /* delayed register/unregister */ 424 /* delayed register/unregister */
384 struct list_head todo_list; 425 struct list_head todo_list;
385 /* device name hash chain */
386 struct hlist_node name_hlist;
387 /* device index hash chain */ 426 /* device index hash chain */
388 struct hlist_node index_hlist; 427 struct hlist_node index_hlist;
389 428
@@ -396,21 +435,6 @@ struct net_device
396 NETREG_RELEASED, /* called free_netdev */ 435 NETREG_RELEASED, /* called free_netdev */
397 } reg_state; 436 } reg_state;
398 437
399 /* Net device features */
400 unsigned long features;
401#define NETIF_F_SG 1 /* Scatter/gather IO. */
402#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */
403#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
404#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
405#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
406#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
407#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
408#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
409#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
410#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
411#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
412#define NETIF_F_LLTX 4096 /* LockLess TX */
413
414 /* Called after device is detached from network. */ 438 /* Called after device is detached from network. */
415 void (*uninit)(struct net_device *dev); 439 void (*uninit)(struct net_device *dev);
416 /* Called after last user reference disappears. */ 440 /* Called after last user reference disappears. */
@@ -419,10 +443,7 @@ struct net_device
419 /* Pointers to interface service routines. */ 443 /* Pointers to interface service routines. */
420 int (*open)(struct net_device *dev); 444 int (*open)(struct net_device *dev);
421 int (*stop)(struct net_device *dev); 445 int (*stop)(struct net_device *dev);
422 int (*hard_start_xmit) (struct sk_buff *skb,
423 struct net_device *dev);
424#define HAVE_NETDEV_POLL 446#define HAVE_NETDEV_POLL
425 int (*poll) (struct net_device *dev, int *quota);
426 int (*hard_header) (struct sk_buff *skb, 447 int (*hard_header) (struct sk_buff *skb,
427 struct net_device *dev, 448 struct net_device *dev,
428 unsigned short type, 449 unsigned short type,
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index bace72a76cc4..4ced38736813 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -332,11 +332,28 @@ extern void need_ip_conntrack(void);
332extern int invert_tuplepr(struct ip_conntrack_tuple *inverse, 332extern int invert_tuplepr(struct ip_conntrack_tuple *inverse,
333 const struct ip_conntrack_tuple *orig); 333 const struct ip_conntrack_tuple *orig);
334 334
335extern void __ip_ct_refresh_acct(struct ip_conntrack *ct,
336 enum ip_conntrack_info ctinfo,
337 const struct sk_buff *skb,
338 unsigned long extra_jiffies,
339 int do_acct);
340
341/* Refresh conntrack for this many jiffies and do accounting */
342static inline void ip_ct_refresh_acct(struct ip_conntrack *ct,
343 enum ip_conntrack_info ctinfo,
344 const struct sk_buff *skb,
345 unsigned long extra_jiffies)
346{
347 __ip_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
348}
349
335/* Refresh conntrack for this many jiffies */ 350/* Refresh conntrack for this many jiffies */
336extern void ip_ct_refresh_acct(struct ip_conntrack *ct, 351static inline void ip_ct_refresh(struct ip_conntrack *ct,
337 enum ip_conntrack_info ctinfo, 352 const struct sk_buff *skb,
338 const struct sk_buff *skb, 353 unsigned long extra_jiffies)
339 unsigned long extra_jiffies); 354{
355 __ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
356}
340 357
341/* These are for NAT. Icky. */ 358/* These are for NAT. Icky. */
342/* Update TCP window tracking data when NAT mangles the packet */ 359/* Update TCP window tracking data when NAT mangles the packet */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
index 389e3851d52f..816144c75de0 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
@@ -60,8 +60,8 @@ struct ip_ct_pptp_expect {
60 60
61struct pptp_pkt_hdr { 61struct pptp_pkt_hdr {
62 __u16 packetLength; 62 __u16 packetLength;
63 __u16 packetType; 63 __be16 packetType;
64 __u32 magicCookie; 64 __be32 magicCookie;
65}; 65};
66 66
67/* PptpControlMessageType values */ 67/* PptpControlMessageType values */
@@ -93,7 +93,7 @@ struct pptp_pkt_hdr {
93#define PPTP_REMOVE_DEVICE_ERROR 6 93#define PPTP_REMOVE_DEVICE_ERROR 6
94 94
95struct PptpControlHeader { 95struct PptpControlHeader {
96 __u16 messageType; 96 __be16 messageType;
97 __u16 reserved; 97 __u16 reserved;
98}; 98};
99 99
@@ -106,13 +106,13 @@ struct PptpControlHeader {
106#define PPTP_BEARER_CAP_DIGITAL 0x2 106#define PPTP_BEARER_CAP_DIGITAL 0x2
107 107
108struct PptpStartSessionRequest { 108struct PptpStartSessionRequest {
109 __u16 protocolVersion; 109 __be16 protocolVersion;
110 __u8 reserved1; 110 __u8 reserved1;
111 __u8 reserved2; 111 __u8 reserved2;
112 __u32 framingCapability; 112 __be32 framingCapability;
113 __u32 bearerCapability; 113 __be32 bearerCapability;
114 __u16 maxChannels; 114 __be16 maxChannels;
115 __u16 firmwareRevision; 115 __be16 firmwareRevision;
116 __u8 hostName[64]; 116 __u8 hostName[64];
117 __u8 vendorString[64]; 117 __u8 vendorString[64];
118}; 118};
@@ -125,13 +125,13 @@ struct PptpStartSessionRequest {
125#define PPTP_START_UNKNOWN_PROTOCOL 5 125#define PPTP_START_UNKNOWN_PROTOCOL 5
126 126
127struct PptpStartSessionReply { 127struct PptpStartSessionReply {
128 __u16 protocolVersion; 128 __be16 protocolVersion;
129 __u8 resultCode; 129 __u8 resultCode;
130 __u8 generalErrorCode; 130 __u8 generalErrorCode;
131 __u32 framingCapability; 131 __be32 framingCapability;
132 __u32 bearerCapability; 132 __be32 bearerCapability;
133 __u16 maxChannels; 133 __be16 maxChannels;
134 __u16 firmwareRevision; 134 __be16 firmwareRevision;
135 __u8 hostName[64]; 135 __u8 hostName[64];
136 __u8 vendorString[64]; 136 __u8 vendorString[64];
137}; 137};
@@ -155,7 +155,7 @@ struct PptpStopSessionReply {
155}; 155};
156 156
157struct PptpEchoRequest { 157struct PptpEchoRequest {
158 __u32 identNumber; 158 __be32 identNumber;
159}; 159};
160 160
161/* PptpEchoReplyResultCode */ 161/* PptpEchoReplyResultCode */
@@ -163,7 +163,7 @@ struct PptpEchoRequest {
163#define PPTP_ECHO_GENERAL_ERROR 2 163#define PPTP_ECHO_GENERAL_ERROR 2
164 164
165struct PptpEchoReply { 165struct PptpEchoReply {
166 __u32 identNumber; 166 __be32 identNumber;
167 __u8 resultCode; 167 __u8 resultCode;
168 __u8 generalErrorCode; 168 __u8 generalErrorCode;
169 __u16 reserved; 169 __u16 reserved;
@@ -180,16 +180,16 @@ struct PptpEchoReply {
180#define PPTP_DONT_CARE_BEARER_TYPE 3 180#define PPTP_DONT_CARE_BEARER_TYPE 3
181 181
182struct PptpOutCallRequest { 182struct PptpOutCallRequest {
183 __u16 callID; 183 __be16 callID;
184 __u16 callSerialNumber; 184 __be16 callSerialNumber;
185 __u32 minBPS; 185 __be32 minBPS;
186 __u32 maxBPS; 186 __be32 maxBPS;
187 __u32 bearerType; 187 __be32 bearerType;
188 __u32 framingType; 188 __be32 framingType;
189 __u16 packetWindow; 189 __be16 packetWindow;
190 __u16 packetProcDelay; 190 __be16 packetProcDelay;
191 __u16 reserved1; 191 __u16 reserved1;
192 __u16 phoneNumberLength; 192 __be16 phoneNumberLength;
193 __u16 reserved2; 193 __u16 reserved2;
194 __u8 phoneNumber[64]; 194 __u8 phoneNumber[64];
195 __u8 subAddress[64]; 195 __u8 subAddress[64];
@@ -205,24 +205,24 @@ struct PptpOutCallRequest {
205#define PPTP_OUTCALL_DONT_ACCEPT 7 205#define PPTP_OUTCALL_DONT_ACCEPT 7
206 206
207struct PptpOutCallReply { 207struct PptpOutCallReply {
208 __u16 callID; 208 __be16 callID;
209 __u16 peersCallID; 209 __be16 peersCallID;
210 __u8 resultCode; 210 __u8 resultCode;
211 __u8 generalErrorCode; 211 __u8 generalErrorCode;
212 __u16 causeCode; 212 __be16 causeCode;
213 __u32 connectSpeed; 213 __be32 connectSpeed;
214 __u16 packetWindow; 214 __be16 packetWindow;
215 __u16 packetProcDelay; 215 __be16 packetProcDelay;
216 __u32 physChannelID; 216 __be32 physChannelID;
217}; 217};
218 218
219struct PptpInCallRequest { 219struct PptpInCallRequest {
220 __u16 callID; 220 __be16 callID;
221 __u16 callSerialNumber; 221 __be16 callSerialNumber;
222 __u32 callBearerType; 222 __be32 callBearerType;
223 __u32 physChannelID; 223 __be32 physChannelID;
224 __u16 dialedNumberLength; 224 __be16 dialedNumberLength;
225 __u16 dialingNumberLength; 225 __be16 dialingNumberLength;
226 __u8 dialedNumber[64]; 226 __u8 dialedNumber[64];
227 __u8 dialingNumber[64]; 227 __u8 dialingNumber[64];
228 __u8 subAddress[64]; 228 __u8 subAddress[64];
@@ -234,61 +234,54 @@ struct PptpInCallRequest {
234#define PPTP_INCALL_DONT_ACCEPT 3 234#define PPTP_INCALL_DONT_ACCEPT 3
235 235
236struct PptpInCallReply { 236struct PptpInCallReply {
237 __u16 callID; 237 __be16 callID;
238 __u16 peersCallID; 238 __be16 peersCallID;
239 __u8 resultCode; 239 __u8 resultCode;
240 __u8 generalErrorCode; 240 __u8 generalErrorCode;
241 __u16 packetWindow; 241 __be16 packetWindow;
242 __u16 packetProcDelay; 242 __be16 packetProcDelay;
243 __u16 reserved; 243 __u16 reserved;
244}; 244};
245 245
246struct PptpInCallConnected { 246struct PptpInCallConnected {
247 __u16 peersCallID; 247 __be16 peersCallID;
248 __u16 reserved; 248 __u16 reserved;
249 __u32 connectSpeed; 249 __be32 connectSpeed;
250 __u16 packetWindow; 250 __be16 packetWindow;
251 __u16 packetProcDelay; 251 __be16 packetProcDelay;
252 __u32 callFramingType; 252 __be32 callFramingType;
253}; 253};
254 254
255struct PptpClearCallRequest { 255struct PptpClearCallRequest {
256 __u16 callID; 256 __be16 callID;
257 __u16 reserved; 257 __u16 reserved;
258}; 258};
259 259
260struct PptpCallDisconnectNotify { 260struct PptpCallDisconnectNotify {
261 __u16 callID; 261 __be16 callID;
262 __u8 resultCode; 262 __u8 resultCode;
263 __u8 generalErrorCode; 263 __u8 generalErrorCode;
264 __u16 causeCode; 264 __be16 causeCode;
265 __u16 reserved; 265 __u16 reserved;
266 __u8 callStatistics[128]; 266 __u8 callStatistics[128];
267}; 267};
268 268
269struct PptpWanErrorNotify { 269struct PptpWanErrorNotify {
270 __u16 peersCallID; 270 __be16 peersCallID;
271 __u16 reserved; 271 __u16 reserved;
272 __u32 crcErrors; 272 __be32 crcErrors;
273 __u32 framingErrors; 273 __be32 framingErrors;
274 __u32 hardwareOverRuns; 274 __be32 hardwareOverRuns;
275 __u32 bufferOverRuns; 275 __be32 bufferOverRuns;
276 __u32 timeoutErrors; 276 __be32 timeoutErrors;
277 __u32 alignmentErrors; 277 __be32 alignmentErrors;
278}; 278};
279 279
280struct PptpSetLinkInfo { 280struct PptpSetLinkInfo {
281 __u16 peersCallID; 281 __be16 peersCallID;
282 __u16 reserved; 282 __u16 reserved;
283 __u32 sendAccm; 283 __be32 sendAccm;
284 __u32 recvAccm; 284 __be32 recvAccm;
285};
286
287
288struct pptp_priv_data {
289 __u16 call_id;
290 __u16 mcall_id;
291 __u16 pcall_id;
292}; 285};
293 286
294union pptp_ctrl_union { 287union pptp_ctrl_union {
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
index 14dc0f7b6556..20e43f018b7c 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
@@ -17,7 +17,7 @@ union ip_conntrack_manip_proto
17 u_int16_t all; 17 u_int16_t all;
18 18
19 struct { 19 struct {
20 u_int16_t port; 20 __be16 port;
21 } tcp; 21 } tcp;
22 struct { 22 struct {
23 u_int16_t port; 23 u_int16_t port;
@@ -29,7 +29,7 @@ union ip_conntrack_manip_proto
29 u_int16_t port; 29 u_int16_t port;
30 } sctp; 30 } sctp;
31 struct { 31 struct {
32 u_int16_t key; /* key is 32bit, pptp only uses 16 */ 32 __be16 key; /* key is 32bit, pptp only uses 16 */
33 } gre; 33 } gre;
34}; 34};
35 35
@@ -65,7 +65,7 @@ struct ip_conntrack_tuple
65 u_int16_t port; 65 u_int16_t port;
66 } sctp; 66 } sctp;
67 struct { 67 struct {
68 u_int16_t key; /* key is 32bit, 68 __be16 key; /* key is 32bit,
69 * pptp only uses 16 */ 69 * pptp only uses 16 */
70 } gre; 70 } gre;
71 } u; 71 } u;
diff --git a/include/linux/netfilter_ipv4/ip_nat_core.h b/include/linux/netfilter_ipv4/ip_nat_core.h
index 3b50eb91f007..30db23f06b03 100644
--- a/include/linux/netfilter_ipv4/ip_nat_core.h
+++ b/include/linux/netfilter_ipv4/ip_nat_core.h
@@ -5,16 +5,14 @@
5 5
6/* This header used to share core functionality between the standalone 6/* This header used to share core functionality between the standalone
7 NAT module, and the compatibility layer's use of NAT for masquerading. */ 7 NAT module, and the compatibility layer's use of NAT for masquerading. */
8extern int ip_nat_init(void);
9extern void ip_nat_cleanup(void);
10 8
11extern unsigned int nat_packet(struct ip_conntrack *ct, 9extern unsigned int ip_nat_packet(struct ip_conntrack *ct,
12 enum ip_conntrack_info conntrackinfo, 10 enum ip_conntrack_info conntrackinfo,
13 unsigned int hooknum, 11 unsigned int hooknum,
14 struct sk_buff **pskb); 12 struct sk_buff **pskb);
15 13
16extern int icmp_reply_translation(struct sk_buff **pskb, 14extern int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
17 struct ip_conntrack *ct, 15 struct ip_conntrack *ct,
18 enum ip_nat_manip_type manip, 16 enum ip_nat_manip_type manip,
19 enum ip_conntrack_dir dir); 17 enum ip_conntrack_dir dir);
20#endif /* _IP_NAT_CORE_H */ 18#endif /* _IP_NAT_CORE_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c49d28eca561..f74ed9462475 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -185,6 +185,7 @@
185#define PCI_DEVICE_ID_LSI_61C102 0x0901 185#define PCI_DEVICE_ID_LSI_61C102 0x0901
186#define PCI_DEVICE_ID_LSI_63C815 0x1000 186#define PCI_DEVICE_ID_LSI_63C815 0x1000
187#define PCI_DEVICE_ID_LSI_SAS1064 0x0050 187#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
188#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
188#define PCI_DEVICE_ID_LSI_SAS1066 0x005E 189#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
189#define PCI_DEVICE_ID_LSI_SAS1068 0x0054 190#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
190#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C 191#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
@@ -392,6 +393,7 @@
392#define PCI_DEVICE_ID_NS_87560_USB 0x0012 393#define PCI_DEVICE_ID_NS_87560_USB 0x0012
393#define PCI_DEVICE_ID_NS_83815 0x0020 394#define PCI_DEVICE_ID_NS_83815 0x0020
394#define PCI_DEVICE_ID_NS_83820 0x0022 395#define PCI_DEVICE_ID_NS_83820 0x0022
396#define PCI_DEVICE_ID_NS_SATURN 0x0035
395#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 397#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
396#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 398#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
397#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 399#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502
@@ -559,6 +561,7 @@
559#define PCI_VENDOR_ID_DELL 0x1028 561#define PCI_VENDOR_ID_DELL 0x1028
560#define PCI_DEVICE_ID_DELL_RACIII 0x0008 562#define PCI_DEVICE_ID_DELL_RACIII 0x0008
561#define PCI_DEVICE_ID_DELL_RAC4 0x0012 563#define PCI_DEVICE_ID_DELL_RAC4 0x0012
564#define PCI_DEVICE_ID_DELL_PERC5 0x0015
562 565
563#define PCI_VENDOR_ID_MATROX 0x102B 566#define PCI_VENDOR_ID_MATROX 0x102B
564#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 567#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
@@ -769,6 +772,8 @@
769#define PCI_DEVICE_ID_TI_TVP4010 0x3d04 772#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
770#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 773#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
771#define PCI_DEVICE_ID_TI_4450 0x8011 774#define PCI_DEVICE_ID_TI_4450 0x8011
775#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
776#define PCI_DEVICE_ID_TI_X515 0x8036
772#define PCI_DEVICE_ID_TI_1130 0xac12 777#define PCI_DEVICE_ID_TI_1130 0xac12
773#define PCI_DEVICE_ID_TI_1031 0xac13 778#define PCI_DEVICE_ID_TI_1031 0xac13
774#define PCI_DEVICE_ID_TI_1131 0xac15 779#define PCI_DEVICE_ID_TI_1131 0xac15
@@ -785,12 +790,17 @@
785#define PCI_DEVICE_ID_TI_4451 0xac42 790#define PCI_DEVICE_ID_TI_4451 0xac42
786#define PCI_DEVICE_ID_TI_4510 0xac44 791#define PCI_DEVICE_ID_TI_4510 0xac44
787#define PCI_DEVICE_ID_TI_4520 0xac46 792#define PCI_DEVICE_ID_TI_4520 0xac46
793#define PCI_DEVICE_ID_TI_7510 0xac47
794#define PCI_DEVICE_ID_TI_7610 0xac48
795#define PCI_DEVICE_ID_TI_7410 0xac49
788#define PCI_DEVICE_ID_TI_1410 0xac50 796#define PCI_DEVICE_ID_TI_1410 0xac50
789#define PCI_DEVICE_ID_TI_1420 0xac51 797#define PCI_DEVICE_ID_TI_1420 0xac51
790#define PCI_DEVICE_ID_TI_1451A 0xac52 798#define PCI_DEVICE_ID_TI_1451A 0xac52
791#define PCI_DEVICE_ID_TI_1620 0xac54 799#define PCI_DEVICE_ID_TI_1620 0xac54
792#define PCI_DEVICE_ID_TI_1520 0xac55 800#define PCI_DEVICE_ID_TI_1520 0xac55
793#define PCI_DEVICE_ID_TI_1510 0xac56 801#define PCI_DEVICE_ID_TI_1510 0xac56
802#define PCI_DEVICE_ID_TI_X620 0xac8d
803#define PCI_DEVICE_ID_TI_X420 0xac8e
794 804
795#define PCI_VENDOR_ID_SONY 0x104d 805#define PCI_VENDOR_ID_SONY 0x104d
796#define PCI_DEVICE_ID_SONY_CXD3222 0x8039 806#define PCI_DEVICE_ID_SONY_CXD3222 0x8039
@@ -976,6 +986,7 @@
976#define PCI_DEVICE_ID_SUN_SABRE 0xa000 986#define PCI_DEVICE_ID_SUN_SABRE 0xa000
977#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 987#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001
978#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 988#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
989#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
979 990
980#define PCI_VENDOR_ID_CMD 0x1095 991#define PCI_VENDOR_ID_CMD 0x1095
981#define PCI_DEVICE_ID_CMD_640 0x0640 992#define PCI_DEVICE_ID_CMD_640 0x0640
@@ -1268,7 +1279,8 @@
1268#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 1279#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
1269#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 1280#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
1270#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E 1281#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
1271#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F 1282#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
1283#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
1272#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 1284#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
1273#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 1285#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
1274#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B 1286#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B
@@ -2186,7 +2198,12 @@
2186#define PCI_DEVICE_ID_ENE_1211 0x1211 2198#define PCI_DEVICE_ID_ENE_1211 0x1211
2187#define PCI_DEVICE_ID_ENE_1225 0x1225 2199#define PCI_DEVICE_ID_ENE_1225 0x1225
2188#define PCI_DEVICE_ID_ENE_1410 0x1410 2200#define PCI_DEVICE_ID_ENE_1410 0x1410
2201#define PCI_DEVICE_ID_ENE_710 0x1411
2202#define PCI_DEVICE_ID_ENE_712 0x1412
2189#define PCI_DEVICE_ID_ENE_1420 0x1420 2203#define PCI_DEVICE_ID_ENE_1420 0x1420
2204#define PCI_DEVICE_ID_ENE_720 0x1421
2205#define PCI_DEVICE_ID_ENE_722 0x1422
2206
2190#define PCI_VENDOR_ID_CHELSIO 0x1425 2207#define PCI_VENDOR_ID_CHELSIO 0x1425
2191 2208
2192#define PCI_VENDOR_ID_MIPS 0x153f 2209#define PCI_VENDOR_ID_MIPS 0x153f
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3b3266ff1a95..7ab2cdb83ef0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -59,6 +59,10 @@ extern void machine_crash_shutdown(struct pt_regs *);
59 * Architecture independent implemenations of sys_reboot commands. 59 * Architecture independent implemenations of sys_reboot commands.
60 */ 60 */
61 61
62extern void kernel_restart_prepare(char *cmd);
63extern void kernel_halt_prepare(void);
64extern void kernel_power_off_prepare(void);
65
62extern void kernel_restart(char *cmd); 66extern void kernel_restart(char *cmd);
63extern void kernel_halt(void); 67extern void kernel_halt(void);
64extern void kernel_power_off(void); 68extern void kernel_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 49e617fa0f66..c3ba31f210a9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -107,13 +107,25 @@ extern unsigned long nr_iowait(void);
107 107
108#include <asm/processor.h> 108#include <asm/processor.h>
109 109
110/*
111 * Task state bitmask. NOTE! These bits are also
112 * encoded in fs/proc/array.c: get_task_state().
113 *
114 * We have two separate sets of flags: task->state
115 * is about runnability, while task->exit_state are
116 * about the task exiting. Confusing, but this way
117 * modifying one set can't modify the other one by
118 * mistake.
119 */
110#define TASK_RUNNING 0 120#define TASK_RUNNING 0
111#define TASK_INTERRUPTIBLE 1 121#define TASK_INTERRUPTIBLE 1
112#define TASK_UNINTERRUPTIBLE 2 122#define TASK_UNINTERRUPTIBLE 2
113#define TASK_STOPPED 4 123#define TASK_STOPPED 4
114#define TASK_TRACED 8 124#define TASK_TRACED 8
125/* in tsk->exit_state */
115#define EXIT_ZOMBIE 16 126#define EXIT_ZOMBIE 16
116#define EXIT_DEAD 32 127#define EXIT_DEAD 32
128/* in tsk->state again */
117#define TASK_NONINTERACTIVE 64 129#define TASK_NONINTERACTIVE 64
118 130
119#define __set_task_state(tsk, state_value) \ 131#define __set_task_state(tsk, state_value) \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2741c0c55e83..466c879f82b8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -155,8 +155,6 @@ struct skb_shared_info {
155#define SKB_DATAREF_SHIFT 16 155#define SKB_DATAREF_SHIFT 16
156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
157 157
158extern struct timeval skb_tv_base;
159
160struct skb_timeval { 158struct skb_timeval {
161 u32 off_sec; 159 u32 off_sec;
162 u32 off_usec; 160 u32 off_usec;
@@ -175,7 +173,7 @@ enum {
175 * @prev: Previous buffer in list 173 * @prev: Previous buffer in list
176 * @list: List we are on 174 * @list: List we are on
177 * @sk: Socket we are owned by 175 * @sk: Socket we are owned by
178 * @tstamp: Time we arrived stored as offset to skb_tv_base 176 * @tstamp: Time we arrived
179 * @dev: Device we arrived on/are leaving by 177 * @dev: Device we arrived on/are leaving by
180 * @input_dev: Device we arrived on 178 * @input_dev: Device we arrived on
181 * @h: Transport layer header 179 * @h: Transport layer header
@@ -1255,10 +1253,6 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1255{ 1253{
1256 stamp->tv_sec = skb->tstamp.off_sec; 1254 stamp->tv_sec = skb->tstamp.off_sec;
1257 stamp->tv_usec = skb->tstamp.off_usec; 1255 stamp->tv_usec = skb->tstamp.off_usec;
1258 if (skb->tstamp.off_sec) {
1259 stamp->tv_sec += skb_tv_base.tv_sec;
1260 stamp->tv_usec += skb_tv_base.tv_usec;
1261 }
1262} 1256}
1263 1257
1264/** 1258/**
@@ -1272,8 +1266,8 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1272 */ 1266 */
1273static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1267static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1274{ 1268{
1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; 1269 skb->tstamp.off_sec = stamp->tv_sec;
1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; 1270 skb->tstamp.off_usec = stamp->tv_usec;
1277} 1271}
1278 1272
1279extern void __net_timestamp(struct sk_buff *skb); 1273extern void __net_timestamp(struct sk_buff *skb);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 3a29a9f9b451..fc8e367f671e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -202,7 +202,8 @@ enum
202 NET_TR=14, 202 NET_TR=14,
203 NET_DECNET=15, 203 NET_DECNET=15,
204 NET_ECONET=16, 204 NET_ECONET=16,
205 NET_SCTP=17, 205 NET_SCTP=17,
206 NET_LLC=18,
206}; 207};
207 208
208/* /proc/sys/kernel/random */ 209/* /proc/sys/kernel/random */
@@ -522,6 +523,29 @@ enum {
522 NET_IPX_FORWARDING=2 523 NET_IPX_FORWARDING=2
523}; 524};
524 525
526/* /proc/sys/net/llc */
527enum {
528 NET_LLC2=1,
529 NET_LLC_STATION=2,
530};
531
532/* /proc/sys/net/llc/llc2 */
533enum {
534 NET_LLC2_TIMEOUT=1,
535};
536
537/* /proc/sys/net/llc/station */
538enum {
539 NET_LLC_STATION_ACK_TIMEOUT=1,
540};
541
542/* /proc/sys/net/llc/llc2/timeout */
543enum {
544 NET_LLC2_ACK_TIMEOUT=1,
545 NET_LLC2_P_TIMEOUT=2,
546 NET_LLC2_REJ_TIMEOUT=3,
547 NET_LLC2_BUSY_TIMEOUT=4,
548};
525 549
526/* /proc/sys/net/appletalk */ 550/* /proc/sys/net/appletalk */
527enum { 551enum {
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index 081b1ee8516e..e21937cf91d0 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -71,7 +71,7 @@ enum
71 TCF_META_ID_SK_SNDBUF, 71 TCF_META_ID_SK_SNDBUF,
72 TCF_META_ID_SK_ALLOCS, 72 TCF_META_ID_SK_ALLOCS,
73 TCF_META_ID_SK_ROUTE_CAPS, 73 TCF_META_ID_SK_ROUTE_CAPS,
74 TCF_META_ID_SK_HASHENT, 74 TCF_META_ID_SK_HASH,
75 TCF_META_ID_SK_LINGERTIME, 75 TCF_META_ID_SK_LINGERTIME,
76 TCF_META_ID_SK_ACK_BACKLOG, 76 TCF_META_ID_SK_ACK_BACKLOG,
77 TCF_META_ID_SK_MAX_ACK_BACKLOG, 77 TCF_META_ID_SK_MAX_ACK_BACKLOG,
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 03df3b157960..5a2beed5a770 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -26,19 +26,18 @@
26struct inet_hashinfo; 26struct inet_hashinfo;
27 27
28/* I have no idea if this is a good hash for v6 or not. -DaveM */ 28/* I have no idea if this is a good hash for v6 or not. -DaveM */
29static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, 29static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
30 const struct in6_addr *faddr, const u16 fport, 30 const struct in6_addr *faddr, const u16 fport)
31 const int ehash_size)
32{ 31{
33 int hashent = (lport ^ fport); 32 unsigned int hashent = (lport ^ fport);
34 33
35 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); 34 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
36 hashent ^= hashent >> 16; 35 hashent ^= hashent >> 16;
37 hashent ^= hashent >> 8; 36 hashent ^= hashent >> 8;
38 return (hashent & (ehash_size - 1)); 37 return hashent;
39} 38}
40 39
41static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) 40static inline int inet6_sk_ehashfn(const struct sock *sk)
42{ 41{
43 const struct inet_sock *inet = inet_sk(sk); 42 const struct inet_sock *inet = inet_sk(sk);
44 const struct ipv6_pinfo *np = inet6_sk(sk); 43 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -46,7 +45,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size)
46 const struct in6_addr *faddr = &np->daddr; 45 const struct in6_addr *faddr = &np->daddr;
47 const __u16 lport = inet->num; 46 const __u16 lport = inet->num;
48 const __u16 fport = inet->dport; 47 const __u16 fport = inet->dport;
49 return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size); 48 return inet6_ehashfn(laddr, lport, faddr, fport);
50} 49}
51 50
52/* 51/*
@@ -69,14 +68,14 @@ static inline struct sock *
69 /* Optimize here for direct hit, only listening connections can 68 /* Optimize here for direct hit, only listening connections can
70 * have wildcards anyways. 69 * have wildcards anyways.
71 */ 70 */
72 const int hash = inet6_ehashfn(daddr, hnum, saddr, sport, 71 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
73 hashinfo->ehash_size); 72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
74 struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
75 73
74 prefetch(head->chain.first);
76 read_lock(&head->lock); 75 read_lock(&head->lock);
77 sk_for_each(sk, node, &head->chain) { 76 sk_for_each(sk, node, &head->chain) {
78 /* For IPV6 do the cheaper port and family tests first. */ 77 /* For IPV6 do the cheaper port and family tests first. */
79 if (INET6_MATCH(sk, saddr, daddr, ports, dif)) 78 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
80 goto hit; /* You sunk my battleship! */ 79 goto hit; /* You sunk my battleship! */
81 } 80 }
82 /* Must check for a TIME_WAIT'er before going to listener hash. */ 81 /* Must check for a TIME_WAIT'er before going to listener hash. */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 646b6ea7fe26..35f49e65e295 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -108,7 +108,7 @@ struct inet_hashinfo {
108 struct inet_bind_hashbucket *bhash; 108 struct inet_bind_hashbucket *bhash;
109 109
110 int bhash_size; 110 int bhash_size;
111 int ehash_size; 111 unsigned int ehash_size;
112 112
113 /* All sockets in TCP_LISTEN state will be in here. This is the only 113 /* All sockets in TCP_LISTEN state will be in here. This is the only
114 * table where wildcard'd TCP sockets can exist. Hash function here 114 * table where wildcard'd TCP sockets can exist. Hash function here
@@ -130,17 +130,16 @@ struct inet_hashinfo {
130 int port_rover; 130 int port_rover;
131}; 131};
132 132
133static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, 133static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport,
134 const __u32 faddr, const __u16 fport, 134 const __u32 faddr, const __u16 fport)
135 const int ehash_size)
136{ 135{
137 int h = (laddr ^ lport) ^ (faddr ^ fport); 136 unsigned int h = (laddr ^ lport) ^ (faddr ^ fport);
138 h ^= h >> 16; 137 h ^= h >> 16;
139 h ^= h >> 8; 138 h ^= h >> 8;
140 return h & (ehash_size - 1); 139 return h;
141} 140}
142 141
143static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) 142static inline int inet_sk_ehashfn(const struct sock *sk)
144{ 143{
145 const struct inet_sock *inet = inet_sk(sk); 144 const struct inet_sock *inet = inet_sk(sk);
146 const __u32 laddr = inet->rcv_saddr; 145 const __u32 laddr = inet->rcv_saddr;
@@ -148,7 +147,14 @@ static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size)
148 const __u32 faddr = inet->daddr; 147 const __u32 faddr = inet->daddr;
149 const __u16 fport = inet->dport; 148 const __u16 fport = inet->dport;
150 149
151 return inet_ehashfn(laddr, lport, faddr, fport, ehash_size); 150 return inet_ehashfn(laddr, lport, faddr, fport);
151}
152
153static inline struct inet_ehash_bucket *inet_ehash_bucket(
154 struct inet_hashinfo *hashinfo,
155 unsigned int hash)
156{
157 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
152} 158}
153 159
154extern struct inet_bind_bucket * 160extern struct inet_bind_bucket *
@@ -235,9 +241,11 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo,
235 lock = &hashinfo->lhash_lock; 241 lock = &hashinfo->lhash_lock;
236 inet_listen_wlock(hashinfo); 242 inet_listen_wlock(hashinfo);
237 } else { 243 } else {
238 sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); 244 struct inet_ehash_bucket *head;
239 list = &hashinfo->ehash[sk->sk_hashent].chain; 245 sk->sk_hash = inet_sk_ehashfn(sk);
240 lock = &hashinfo->ehash[sk->sk_hashent].lock; 246 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
247 list = &head->chain;
248 lock = &head->lock;
241 write_lock(lock); 249 write_lock(lock);
242 } 250 }
243 __sk_add_node(sk, list); 251 __sk_add_node(sk, list);
@@ -268,9 +276,8 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
268 inet_listen_wlock(hashinfo); 276 inet_listen_wlock(hashinfo);
269 lock = &hashinfo->lhash_lock; 277 lock = &hashinfo->lhash_lock;
270 } else { 278 } else {
271 struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent]; 279 lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
272 lock = &head->lock; 280 write_lock_bh(lock);
273 write_lock_bh(&head->lock);
274 } 281 }
275 282
276 if (__sk_del_node_init(sk)) 283 if (__sk_del_node_init(sk))
@@ -337,23 +344,27 @@ sherry_cache:
337#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 344#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
338 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr)); 345 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
339#endif /* __BIG_ENDIAN */ 346#endif /* __BIG_ENDIAN */
340#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 347#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
341 (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ 348 (((__sk)->sk_hash == (__hash)) && \
349 ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
342 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 350 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
343 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 351 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
344#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 352#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
345 (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ 353 (((__sk)->sk_hash == (__hash)) && \
354 ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
346 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ 355 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
347 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 356 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
348#else /* 32-bit arch */ 357#else /* 32-bit arch */
349#define INET_ADDR_COOKIE(__name, __saddr, __daddr) 358#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
350#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ 359#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
351 ((inet_sk(__sk)->daddr == (__saddr)) && \ 360 (((__sk)->sk_hash == (__hash)) && \
361 (inet_sk(__sk)->daddr == (__saddr)) && \
352 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ 362 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
353 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 363 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
354 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 364 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
355#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ 365#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
356 ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \ 366 (((__sk)->sk_hash == (__hash)) && \
367 (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
357 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ 368 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
358 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ 369 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
359 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 370 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
@@ -378,18 +389,19 @@ static inline struct sock *
378 /* Optimize here for direct hit, only listening connections can 389 /* Optimize here for direct hit, only listening connections can
379 * have wildcards anyways. 390 * have wildcards anyways.
380 */ 391 */
381 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size); 392 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
382 struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; 393 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
383 394
395 prefetch(head->chain.first);
384 read_lock(&head->lock); 396 read_lock(&head->lock);
385 sk_for_each(sk, node, &head->chain) { 397 sk_for_each(sk, node, &head->chain) {
386 if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) 398 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
387 goto hit; /* You sunk my battleship! */ 399 goto hit; /* You sunk my battleship! */
388 } 400 }
389 401
390 /* Must check for a TIME_WAIT'er before going to listener hash. */ 402 /* Must check for a TIME_WAIT'er before going to listener hash. */
391 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 403 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
392 if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) 404 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
393 goto hit; 405 goto hit;
394 } 406 }
395 sk = NULL; 407 sk = NULL;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 3b070352e869..4ade56ef3a4d 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -112,6 +112,7 @@ struct inet_timewait_sock {
112#define tw_node __tw_common.skc_node 112#define tw_node __tw_common.skc_node
113#define tw_bind_node __tw_common.skc_bind_node 113#define tw_bind_node __tw_common.skc_bind_node
114#define tw_refcnt __tw_common.skc_refcnt 114#define tw_refcnt __tw_common.skc_refcnt
115#define tw_hash __tw_common.skc_hash
115#define tw_prot __tw_common.skc_prot 116#define tw_prot __tw_common.skc_prot
116 volatile unsigned char tw_substate; 117 volatile unsigned char tw_substate;
117 /* 3 bits hole, try to pack */ 118 /* 3 bits hole, try to pack */
@@ -126,7 +127,6 @@ struct inet_timewait_sock {
126 /* And these are ours. */ 127 /* And these are ours. */
127 __u8 tw_ipv6only:1; 128 __u8 tw_ipv6only:1;
128 /* 31 bits hole, try to pack */ 129 /* 31 bits hole, try to pack */
129 int tw_hashent;
130 int tw_timeout; 130 int tw_timeout;
131 unsigned long tw_ttd; 131 unsigned long tw_ttd;
132 struct inet_bind_bucket *tw_tb; 132 struct inet_bind_bucket *tw_tb;
diff --git a/include/net/llc.h b/include/net/llc.h
index 71769a5aeef3..1adb2ef3f6f7 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -17,6 +17,8 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include <asm/atomic.h>
21
20struct net_device; 22struct net_device;
21struct packet_type; 23struct packet_type;
22struct sk_buff; 24struct sk_buff;
@@ -44,6 +46,7 @@ struct llc_sap {
44 unsigned char state; 46 unsigned char state;
45 unsigned char p_bit; 47 unsigned char p_bit;
46 unsigned char f_bit; 48 unsigned char f_bit;
49 atomic_t refcnt;
47 int (*rcv_func)(struct sk_buff *skb, 50 int (*rcv_func)(struct sk_buff *skb,
48 struct net_device *dev, 51 struct net_device *dev,
49 struct packet_type *pt, 52 struct packet_type *pt,
@@ -81,13 +84,27 @@ extern struct llc_sap *llc_sap_open(unsigned char lsap,
81 struct net_device *dev, 84 struct net_device *dev,
82 struct packet_type *pt, 85 struct packet_type *pt,
83 struct net_device *orig_dev)); 86 struct net_device *orig_dev));
87static inline void llc_sap_hold(struct llc_sap *sap)
88{
89 atomic_inc(&sap->refcnt);
90}
91
84extern void llc_sap_close(struct llc_sap *sap); 92extern void llc_sap_close(struct llc_sap *sap);
85 93
94static inline void llc_sap_put(struct llc_sap *sap)
95{
96 if (atomic_dec_and_test(&sap->refcnt))
97 llc_sap_close(sap);
98}
99
86extern struct llc_sap *llc_sap_find(unsigned char sap_value); 100extern struct llc_sap *llc_sap_find(unsigned char sap_value);
87 101
88extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, 102extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
89 unsigned char *dmac, unsigned char dsap); 103 unsigned char *dmac, unsigned char dsap);
90 104
105extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
106extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
107
91extern int llc_station_init(void); 108extern int llc_station_init(void);
92extern void llc_station_exit(void); 109extern void llc_station_exit(void);
93 110
@@ -98,4 +115,17 @@ extern void llc_proc_exit(void);
98#define llc_proc_init() (0) 115#define llc_proc_init() (0)
99#define llc_proc_exit() do { } while(0) 116#define llc_proc_exit() do { } while(0)
100#endif /* CONFIG_PROC_FS */ 117#endif /* CONFIG_PROC_FS */
118#ifdef CONFIG_SYSCTL
119extern int llc_sysctl_init(void);
120extern void llc_sysctl_exit(void);
121
122extern int sysctl_llc2_ack_timeout;
123extern int sysctl_llc2_busy_timeout;
124extern int sysctl_llc2_p_timeout;
125extern int sysctl_llc2_rej_timeout;
126extern int sysctl_llc_station_ack_timeout;
127#else
128#define llc_sysctl_init() (0)
129#define llc_sysctl_exit() do { } while(0)
130#endif /* CONFIG_SYSCTL */
101#endif /* LLC_H */ 131#endif /* LLC_H */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 8ad3bc2c23d7..54852ff6033b 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -19,14 +19,14 @@
19#define LLC_EVENT 1 19#define LLC_EVENT 1
20#define LLC_PACKET 2 20#define LLC_PACKET 2
21 21
22#define LLC_P_TIME 2 22#define LLC2_P_TIME 2
23#define LLC_ACK_TIME 1 23#define LLC2_ACK_TIME 1
24#define LLC_REJ_TIME 3 24#define LLC2_REJ_TIME 3
25#define LLC_BUSY_TIME 3 25#define LLC2_BUSY_TIME 3
26 26
27struct llc_timer { 27struct llc_timer {
28 struct timer_list timer; 28 struct timer_list timer;
29 u16 expire; /* timer expire time */ 29 unsigned long expire; /* timer expire time */
30}; 30};
31 31
32struct llc_sock { 32struct llc_sock {
@@ -38,6 +38,7 @@ struct llc_sock {
38 struct llc_addr laddr; /* lsap/mac pair */ 38 struct llc_addr laddr; /* lsap/mac pair */
39 struct llc_addr daddr; /* dsap/mac pair */ 39 struct llc_addr daddr; /* dsap/mac pair */
40 struct net_device *dev; /* device to send to remote */ 40 struct net_device *dev; /* device to send to remote */
41 u32 copied_seq; /* head of yet unread data */
41 u8 retry_count; /* number of retries */ 42 u8 retry_count; /* number of retries */
42 u8 ack_must_be_send; 43 u8 ack_must_be_send;
43 u8 first_pdu_Ns; 44 u8 first_pdu_Ns;
@@ -92,7 +93,8 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
92 return skb->cb[sizeof(skb->cb) - 1]; 93 return skb->cb[sizeof(skb->cb) - 1];
93} 94}
94 95
95extern struct sock *llc_sk_alloc(int family, int priority, struct proto *prot); 96extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
97 struct proto *prot);
96extern void llc_sk_free(struct sock *sk); 98extern void llc_sk_free(struct sock *sk);
97 99
98extern void llc_sk_reset(struct sock *sk); 100extern void llc_sk_reset(struct sock *sk);
@@ -115,5 +117,4 @@ extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
115 117
116extern u8 llc_data_accept_state(u8 state); 118extern u8 llc_data_accept_state(u8 state);
117extern void llc_build_offset_table(void); 119extern void llc_build_offset_table(void);
118extern int llc_release_sockets(struct llc_sap *sap);
119#endif /* LLC_CONN_H */ 120#endif /* LLC_CONN_H */
diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h
index 353baaa627f3..2c56dbece729 100644
--- a/include/net/llc_sap.h
+++ b/include/net/llc_sap.h
@@ -12,11 +12,15 @@
12 * See the GNU General Public License for more details. 12 * See the GNU General Public License for more details.
13 */ 13 */
14struct llc_sap; 14struct llc_sap;
15struct net_device;
15struct sk_buff; 16struct sk_buff;
17struct sock;
16 18
17extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb); 19extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
18extern void llc_save_primitive(struct sk_buff* skb, unsigned char prim); 20extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
19extern struct sk_buff *llc_alloc_frame(void); 21 unsigned char prim);
22extern struct sk_buff *llc_alloc_frame(struct sock *sk,
23 struct net_device *dev);
20 24
21extern void llc_build_and_send_test_pkt(struct llc_sap *sap, 25extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
22 struct sk_buff *skb, 26 struct sk_buff *skb,
diff --git a/include/net/sock.h b/include/net/sock.h
index 8c48fbecb7cf..b6440805c420 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -99,6 +99,7 @@ struct proto;
99 * @skc_node: main hash linkage for various protocol lookup tables 99 * @skc_node: main hash linkage for various protocol lookup tables
100 * @skc_bind_node: bind hash linkage for various protocol lookup tables 100 * @skc_bind_node: bind hash linkage for various protocol lookup tables
101 * @skc_refcnt: reference count 101 * @skc_refcnt: reference count
102 * @skc_hash: hash value used with various protocol lookup tables
102 * @skc_prot: protocol handlers inside a network family 103 * @skc_prot: protocol handlers inside a network family
103 * 104 *
104 * This is the minimal network layer representation of sockets, the header 105 * This is the minimal network layer representation of sockets, the header
@@ -112,6 +113,7 @@ struct sock_common {
112 struct hlist_node skc_node; 113 struct hlist_node skc_node;
113 struct hlist_node skc_bind_node; 114 struct hlist_node skc_bind_node;
114 atomic_t skc_refcnt; 115 atomic_t skc_refcnt;
116 unsigned int skc_hash;
115 struct proto *skc_prot; 117 struct proto *skc_prot;
116}; 118};
117 119
@@ -139,7 +141,6 @@ struct sock_common {
139 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 141 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
140 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 142 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
141 * @sk_lingertime: %SO_LINGER l_linger setting 143 * @sk_lingertime: %SO_LINGER l_linger setting
142 * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
143 * @sk_backlog: always used with the per-socket spinlock held 144 * @sk_backlog: always used with the per-socket spinlock held
144 * @sk_callback_lock: used with the callbacks in the end of this struct 145 * @sk_callback_lock: used with the callbacks in the end of this struct
145 * @sk_error_queue: rarely used 146 * @sk_error_queue: rarely used
@@ -186,6 +187,7 @@ struct sock {
186#define sk_node __sk_common.skc_node 187#define sk_node __sk_common.skc_node
187#define sk_bind_node __sk_common.skc_bind_node 188#define sk_bind_node __sk_common.skc_bind_node
188#define sk_refcnt __sk_common.skc_refcnt 189#define sk_refcnt __sk_common.skc_refcnt
190#define sk_hash __sk_common.skc_hash
189#define sk_prot __sk_common.skc_prot 191#define sk_prot __sk_common.skc_prot
190 unsigned char sk_shutdown : 2, 192 unsigned char sk_shutdown : 2,
191 sk_no_check : 2, 193 sk_no_check : 2,
@@ -208,7 +210,6 @@ struct sock {
208 unsigned int sk_allocation; 210 unsigned int sk_allocation;
209 int sk_sndbuf; 211 int sk_sndbuf;
210 int sk_route_caps; 212 int sk_route_caps;
211 int sk_hashent;
212 unsigned long sk_flags; 213 unsigned long sk_flags;
213 unsigned long sk_lingertime; 214 unsigned long sk_lingertime;
214 /* 215 /*
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 0f7aacc33fe9..c8592c7e8eaa 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -21,6 +21,9 @@
21#include <pcmcia/cs_types.h> 21#include <pcmcia/cs_types.h>
22#include <pcmcia/cs.h> 22#include <pcmcia/cs.h>
23#include <pcmcia/bulkmem.h> 23#include <pcmcia/bulkmem.h>
24#ifdef CONFIG_CARDBUS
25#include <linux/pci.h>
26#endif
24 27
25/* Definitions for card status flags for GetStatus */ 28/* Definitions for card status flags for GetStatus */
26#define SS_WRPROT 0x0001 29#define SS_WRPROT 0x0001
@@ -233,7 +236,11 @@ struct pcmcia_socket {
233 236
234 /* so is power hook */ 237 /* so is power hook */
235 int (*power_hook)(struct pcmcia_socket *sock, int operation); 238 int (*power_hook)(struct pcmcia_socket *sock, int operation);
236 239#ifdef CONFIG_CARDBUS
240 /* allows tuning the CB bridge before loading driver for the CB card */
241 void (*tune_bridge)(struct pcmcia_socket *sock, struct pci_bus *bus);
242#endif
243
237 /* state thread */ 244 /* state thread */
238 struct semaphore skt_sem; /* protects socket h/w state */ 245 struct semaphore skt_sem; /* protects socket h/w state */
239 246
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e16cf94870f2..e6f4c9e55df7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -665,7 +665,6 @@ struct ib_ucontext {
665 struct list_head qp_list; 665 struct list_head qp_list;
666 struct list_head srq_list; 666 struct list_head srq_list;
667 struct list_head ah_list; 667 struct list_head ah_list;
668 spinlock_t lock;
669}; 668};
670 669
671struct ib_uobject { 670struct ib_uobject {
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c0e4c67d836f..7ece05666feb 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -163,6 +163,7 @@ struct scsi_target {
163 unsigned int id; /* target id ... replace 163 unsigned int id; /* target id ... replace
164 * scsi_device.id eventually */ 164 * scsi_device.id eventually */
165 unsigned long create:1; /* signal that it needs to be added */ 165 unsigned long create:1; /* signal that it needs to be added */
166 char scsi_level;
166 void *hostdata; /* available to low-level driver */ 167 void *hostdata; /* available to low-level driver */
167 unsigned long starget_data[0]; /* for the transport */ 168 unsigned long starget_data[0]; /* for the transport */
168 /* starget_data must be the last element!!!! */ 169 /* starget_data must be the last element!!!! */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 3a926011507b..a0f18c9cc89d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -611,6 +611,7 @@ static struct file *do_create(struct dentry *dir, struct dentry *dentry,
611 dentry->d_fsdata = &attr; 611 dentry->d_fsdata = &attr;
612 } 612 }
613 613
614 mode &= ~current->fs->umask;
614 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 615 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
615 dentry->d_fsdata = NULL; 616 dentry->d_fsdata = NULL;
616 if (ret) 617 if (ret)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 79866bc6b3a1..45a5719a0104 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -968,8 +968,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
968 char *page; 968 char *page;
969 ssize_t retval = 0; 969 ssize_t retval = 0;
970 char *s; 970 char *s;
971 char *start;
972 size_t n;
973 971
974 if (!(page = (char *)__get_free_page(GFP_KERNEL))) 972 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
975 return -ENOMEM; 973 return -ENOMEM;
@@ -999,14 +997,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
999 *s++ = '\n'; 997 *s++ = '\n';
1000 *s = '\0'; 998 *s = '\0';
1001 999
1002 /* Do nothing if *ppos is at the eof or beyond the eof. */ 1000 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1003 if (s - page <= *ppos)
1004 return 0;
1005
1006 start = page + *ppos;
1007 n = s - start;
1008 retval = n - copy_to_user(buf, start, min(n, nbytes));
1009 *ppos += retval;
1010out: 1001out:
1011 free_page((unsigned long)page); 1002 free_page((unsigned long)page);
1012 return retval; 1003 return retval;
diff --git a/kernel/exit.c b/kernel/exit.c
index ee6d8b8abef5..43077732619b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1203,7 +1203,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
1203 1203
1204 exit_code = p->exit_code; 1204 exit_code = p->exit_code;
1205 if (unlikely(!exit_code) || 1205 if (unlikely(!exit_code) ||
1206 unlikely(p->state > TASK_STOPPED)) 1206 unlikely(p->state & TASK_TRACED))
1207 goto bail_ref; 1207 goto bail_ref;
1208 return wait_noreap_copyout(p, pid, uid, 1208 return wait_noreap_copyout(p, pid, uid,
1209 why, (exit_code << 8) | 0x7f, 1209 why, (exit_code << 8) | 0x7f,
diff --git a/kernel/params.c b/kernel/params.c
index fbf173215fd2..1a8614bac5d5 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -80,8 +80,6 @@ static char *next_arg(char *args, char **param, char **val)
80 int in_quote = 0, quoted = 0; 80 int in_quote = 0, quoted = 0;
81 char *next; 81 char *next;
82 82
83 /* Chew any extra spaces */
84 while (*args == ' ') args++;
85 if (*args == '"') { 83 if (*args == '"') {
86 args++; 84 args++;
87 in_quote = 1; 85 in_quote = 1;
@@ -121,6 +119,10 @@ static char *next_arg(char *args, char **param, char **val)
121 next = args + i + 1; 119 next = args + i + 1;
122 } else 120 } else
123 next = args + i; 121 next = args + i;
122
123 /* Chew up trailing spaces. */
124 while (*next == ' ')
125 next++;
124 return next; 126 return next;
125} 127}
126 128
@@ -135,6 +137,10 @@ int parse_args(const char *name,
135 137
136 DEBUGP("Parsing ARGS: %s\n", args); 138 DEBUGP("Parsing ARGS: %s\n", args);
137 139
140 /* Chew leading spaces */
141 while (*args == ' ')
142 args++;
143
138 while (*args) { 144 while (*args) {
139 int ret; 145 int ret;
140 146
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 396c7873e804..46a5e5acff97 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -29,7 +29,7 @@ config PM_DEBUG
29 29
30config SOFTWARE_SUSPEND 30config SOFTWARE_SUSPEND
31 bool "Software Suspend" 31 bool "Software Suspend"
32 depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP)) 32 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FVR || PPC32) && !SMP)
33 ---help--- 33 ---help---
34 Enable the possibility of suspending the machine. 34 Enable the possibility of suspending the machine.
35 It doesn't need APM. 35 It doesn't need APM.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 2d8bf054d036..761956e813f5 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -17,12 +17,12 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/pm.h>
20 21
21#include "power.h" 22#include "power.h"
22 23
23 24
24extern suspend_disk_method_t pm_disk_mode; 25extern suspend_disk_method_t pm_disk_mode;
25extern struct pm_ops * pm_ops;
26 26
27extern int swsusp_suspend(void); 27extern int swsusp_suspend(void);
28extern int swsusp_write(void); 28extern int swsusp_write(void);
@@ -49,13 +49,11 @@ dev_t swsusp_resume_device;
49 49
50static void power_down(suspend_disk_method_t mode) 50static void power_down(suspend_disk_method_t mode)
51{ 51{
52 unsigned long flags;
53 int error = 0; 52 int error = 0;
54 53
55 local_irq_save(flags);
56 switch(mode) { 54 switch(mode) {
57 case PM_DISK_PLATFORM: 55 case PM_DISK_PLATFORM:
58 device_shutdown(); 56 kernel_power_off_prepare();
59 error = pm_ops->enter(PM_SUSPEND_DISK); 57 error = pm_ops->enter(PM_SUSPEND_DISK);
60 break; 58 break;
61 case PM_DISK_SHUTDOWN: 59 case PM_DISK_SHUTDOWN:
diff --git a/kernel/power/power.h b/kernel/power/power.h
index cd6a3493cc0d..6748de23e83c 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,7 +1,7 @@
1#include <linux/suspend.h> 1#include <linux/suspend.h>
2#include <linux/utsname.h> 2#include <linux/utsname.h>
3 3
4/* With SUSPEND_CONSOLE defined, it suspend looks *really* cool, but 4/* With SUSPEND_CONSOLE defined suspend looks *really* cool, but
5 we probably do not take enough locks for switching consoles, etc, 5 we probably do not take enough locks for switching consoles, etc,
6 so bad things might happen. 6 so bad things might happen.
7*/ 7*/
@@ -9,6 +9,9 @@
9#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 9#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
10#endif 10#endif
11 11
12#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \
13 - 4 - 3*sizeof(unsigned long) - sizeof(int) \
14 - sizeof(void *)) / sizeof(swp_entry_t))
12 15
13struct swsusp_info { 16struct swsusp_info {
14 struct new_utsname uts; 17 struct new_utsname uts;
@@ -18,7 +21,7 @@ struct swsusp_info {
18 unsigned long image_pages; 21 unsigned long image_pages;
19 unsigned long pagedir_pages; 22 unsigned long pagedir_pages;
20 suspend_pagedir_t * suspend_pagedir; 23 suspend_pagedir_t * suspend_pagedir;
21 swp_entry_t pagedir[768]; 24 swp_entry_t pagedir[MAX_PBES];
22} __attribute__((aligned(PAGE_SIZE))); 25} __attribute__((aligned(PAGE_SIZE)));
23 26
24 27
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index d967e875ee82..acf79ac1cb6d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -363,7 +363,7 @@ static void lock_swapdevices(void)
363} 363}
364 364
365/** 365/**
366 * write_swap_page - Write one page to a fresh swap location. 366 * write_page - Write one page to a fresh swap location.
367 * @addr: Address we're writing. 367 * @addr: Address we're writing.
368 * @loc: Place to store the entry we used. 368 * @loc: Place to store the entry we used.
369 * 369 *
@@ -402,15 +402,14 @@ static int write_page(unsigned long addr, swp_entry_t * loc)
402static void data_free(void) 402static void data_free(void)
403{ 403{
404 swp_entry_t entry; 404 swp_entry_t entry;
405 int i; 405 struct pbe * p;
406 406
407 for (i = 0; i < nr_copy_pages; i++) { 407 for_each_pbe(p, pagedir_nosave) {
408 entry = (pagedir_nosave + i)->swap_address; 408 entry = p->swap_address;
409 if (entry.val) 409 if (entry.val)
410 swap_free(entry); 410 swap_free(entry);
411 else 411 else
412 break; 412 break;
413 (pagedir_nosave + i)->swap_address = (swp_entry_t){0};
414 } 413 }
415} 414}
416 415
@@ -863,6 +862,9 @@ static int alloc_image_pages(void)
863 return 0; 862 return 0;
864} 863}
865 864
865/* Free pages we allocated for suspend. Suspend pages are alocated
866 * before atomic copy, so we need to free them after resume.
867 */
866void swsusp_free(void) 868void swsusp_free(void)
867{ 869{
868 BUG_ON(PageNosave(virt_to_page(pagedir_save))); 870 BUG_ON(PageNosave(virt_to_page(pagedir_save)));
@@ -918,6 +920,7 @@ static int swsusp_alloc(void)
918 920
919 pagedir_nosave = NULL; 921 pagedir_nosave = NULL;
920 nr_copy_pages = calc_nr(nr_copy_pages); 922 nr_copy_pages = calc_nr(nr_copy_pages);
923 nr_copy_pages_check = nr_copy_pages;
921 924
922 pr_debug("suspend: (pages needed: %d + %d free: %d)\n", 925 pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
923 nr_copy_pages, PAGES_FOR_IO, nr_free_pages()); 926 nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
@@ -928,6 +931,10 @@ static int swsusp_alloc(void)
928 if (!enough_swap()) 931 if (!enough_swap())
929 return -ENOSPC; 932 return -ENOSPC;
930 933
934 if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
935 !!(nr_copy_pages % PBES_PER_PAGE))
936 return -ENOSPC;
937
931 if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) { 938 if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
932 printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); 939 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
933 return -ENOMEM; 940 return -ENOMEM;
@@ -940,7 +947,6 @@ static int swsusp_alloc(void)
940 return error; 947 return error;
941 } 948 }
942 949
943 nr_copy_pages_check = nr_copy_pages;
944 return 0; 950 return 0;
945} 951}
946 952
@@ -1213,8 +1219,9 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1213 free_pagedir(pblist); 1219 free_pagedir(pblist);
1214 free_eaten_memory(); 1220 free_eaten_memory();
1215 pblist = NULL; 1221 pblist = NULL;
1216 } 1222 /* Is this even worth handling? It should never ever happen, and we
1217 else 1223 have just lost user's state, anyway... */
1224 } else
1218 printk("swsusp: Relocated %d pages\n", rel); 1225 printk("swsusp: Relocated %d pages\n", rel);
1219 1226
1220 return pblist; 1227 return pblist;
@@ -1434,9 +1441,9 @@ static int read_pagedir(struct pbe *pblist)
1434 } 1441 }
1435 1442
1436 if (error) 1443 if (error)
1437 free_page((unsigned long)pblist); 1444 free_pagedir(pblist);
1438 1445 else
1439 BUG_ON(i != swsusp_info.pagedir_pages); 1446 BUG_ON(i != swsusp_info.pagedir_pages);
1440 1447
1441 return error; 1448 return error;
1442} 1449}
diff --git a/kernel/signal.c b/kernel/signal.c
index b92c3c9f8b9a..619b027e92b5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -936,34 +936,31 @@ force_sig_specific(int sig, struct task_struct *t)
936 * as soon as they're available, so putting the signal on the shared queue 936 * as soon as they're available, so putting the signal on the shared queue
937 * will be equivalent to sending it to one such thread. 937 * will be equivalent to sending it to one such thread.
938 */ 938 */
939#define wants_signal(sig, p, mask) \ 939static inline int wants_signal(int sig, struct task_struct *p)
940 (!sigismember(&(p)->blocked, sig) \ 940{
941 && !((p)->state & mask) \ 941 if (sigismember(&p->blocked, sig))
942 && !((p)->flags & PF_EXITING) \ 942 return 0;
943 && (task_curr(p) || !signal_pending(p))) 943 if (p->flags & PF_EXITING)
944 944 return 0;
945 if (sig == SIGKILL)
946 return 1;
947 if (p->state & (TASK_STOPPED | TASK_TRACED))
948 return 0;
949 return task_curr(p) || !signal_pending(p);
950}
945 951
946static void 952static void
947__group_complete_signal(int sig, struct task_struct *p) 953__group_complete_signal(int sig, struct task_struct *p)
948{ 954{
949 unsigned int mask;
950 struct task_struct *t; 955 struct task_struct *t;
951 956
952 /* 957 /*
953 * Don't bother traced and stopped tasks (but
954 * SIGKILL will punch through that).
955 */
956 mask = TASK_STOPPED | TASK_TRACED;
957 if (sig == SIGKILL)
958 mask = 0;
959
960 /*
961 * Now find a thread we can wake up to take the signal off the queue. 958 * Now find a thread we can wake up to take the signal off the queue.
962 * 959 *
963 * If the main thread wants the signal, it gets first crack. 960 * If the main thread wants the signal, it gets first crack.
964 * Probably the least surprising to the average bear. 961 * Probably the least surprising to the average bear.
965 */ 962 */
966 if (wants_signal(sig, p, mask)) 963 if (wants_signal(sig, p))
967 t = p; 964 t = p;
968 else if (thread_group_empty(p)) 965 else if (thread_group_empty(p))
969 /* 966 /*
@@ -981,7 +978,7 @@ __group_complete_signal(int sig, struct task_struct *p)
981 t = p->signal->curr_target = p; 978 t = p->signal->curr_target = p;
982 BUG_ON(t->tgid != p->tgid); 979 BUG_ON(t->tgid != p->tgid);
983 980
984 while (!wants_signal(sig, t, mask)) { 981 while (!wants_signal(sig, t)) {
985 t = next_thread(t); 982 t = next_thread(t);
986 if (t == p->signal->curr_target) 983 if (t == p->signal->curr_target)
987 /* 984 /*
@@ -1766,7 +1763,8 @@ do_signal_stop(int signr)
1766 * stop is always done with the siglock held, 1763 * stop is always done with the siglock held,
1767 * so this check has no races. 1764 * so this check has no races.
1768 */ 1765 */
1769 if (t->state < TASK_STOPPED) { 1766 if (!t->exit_state &&
1767 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1770 stop_count++; 1768 stop_count++;
1771 signal_wake_up(t, 0); 1769 signal_wake_up(t, 0);
1772 } 1770 }
diff --git a/kernel/sys.c b/kernel/sys.c
index f723522e6986..2fa1ed18123c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -361,17 +361,35 @@ out_unlock:
361 return retval; 361 return retval;
362} 362}
363 363
364/**
365 * emergency_restart - reboot the system
366 *
367 * Without shutting down any hardware or taking any locks
368 * reboot the system. This is called when we know we are in
369 * trouble so this is our best effort to reboot. This is
370 * safe to call in interrupt context.
371 */
364void emergency_restart(void) 372void emergency_restart(void)
365{ 373{
366 machine_emergency_restart(); 374 machine_emergency_restart();
367} 375}
368EXPORT_SYMBOL_GPL(emergency_restart); 376EXPORT_SYMBOL_GPL(emergency_restart);
369 377
370void kernel_restart(char *cmd) 378/**
379 * kernel_restart - reboot the system
380 *
381 * Shutdown everything and perform a clean reboot.
382 * This is not safe to call in interrupt context.
383 */
384void kernel_restart_prepare(char *cmd)
371{ 385{
372 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 386 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
373 system_state = SYSTEM_RESTART; 387 system_state = SYSTEM_RESTART;
374 device_shutdown(); 388 device_shutdown();
389}
390void kernel_restart(char *cmd)
391{
392 kernel_restart_prepare(cmd);
375 if (!cmd) { 393 if (!cmd) {
376 printk(KERN_EMERG "Restarting system.\n"); 394 printk(KERN_EMERG "Restarting system.\n");
377 } else { 395 } else {
@@ -382,6 +400,12 @@ void kernel_restart(char *cmd)
382} 400}
383EXPORT_SYMBOL_GPL(kernel_restart); 401EXPORT_SYMBOL_GPL(kernel_restart);
384 402
403/**
404 * kernel_kexec - reboot the system
405 *
406 * Move into place and start executing a preloaded standalone
407 * executable. If nothing was preloaded return an error.
408 */
385void kernel_kexec(void) 409void kernel_kexec(void)
386{ 410{
387#ifdef CONFIG_KEXEC 411#ifdef CONFIG_KEXEC
@@ -390,9 +414,7 @@ void kernel_kexec(void)
390 if (!image) { 414 if (!image) {
391 return; 415 return;
392 } 416 }
393 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); 417 kernel_restart_prepare(NULL);
394 system_state = SYSTEM_RESTART;
395 device_shutdown();
396 printk(KERN_EMERG "Starting new kernel\n"); 418 printk(KERN_EMERG "Starting new kernel\n");
397 machine_shutdown(); 419 machine_shutdown();
398 machine_kexec(image); 420 machine_kexec(image);
@@ -400,21 +422,39 @@ void kernel_kexec(void)
400} 422}
401EXPORT_SYMBOL_GPL(kernel_kexec); 423EXPORT_SYMBOL_GPL(kernel_kexec);
402 424
403void kernel_halt(void) 425/**
426 * kernel_halt - halt the system
427 *
428 * Shutdown everything and perform a clean system halt.
429 */
430void kernel_halt_prepare(void)
404{ 431{
405 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); 432 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
406 system_state = SYSTEM_HALT; 433 system_state = SYSTEM_HALT;
407 device_shutdown(); 434 device_shutdown();
435}
436void kernel_halt(void)
437{
438 kernel_halt_prepare();
408 printk(KERN_EMERG "System halted.\n"); 439 printk(KERN_EMERG "System halted.\n");
409 machine_halt(); 440 machine_halt();
410} 441}
411EXPORT_SYMBOL_GPL(kernel_halt); 442EXPORT_SYMBOL_GPL(kernel_halt);
412 443
413void kernel_power_off(void) 444/**
445 * kernel_power_off - power_off the system
446 *
447 * Shutdown everything and perform a clean system power_off.
448 */
449void kernel_power_off_prepare(void)
414{ 450{
415 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); 451 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
416 system_state = SYSTEM_POWER_OFF; 452 system_state = SYSTEM_POWER_OFF;
417 device_shutdown(); 453 device_shutdown();
454}
455void kernel_power_off(void)
456{
457 kernel_power_off_prepare();
418 printk(KERN_EMERG "Power down.\n"); 458 printk(KERN_EMERG "Power down.\n");
419 machine_power_off(); 459 machine_power_off();
420} 460}
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8ec4e4c2a179..c1330cc19783 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -61,17 +61,9 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
61{ 61{
62 bootmem_data_t *bdata = pgdat->bdata; 62 bootmem_data_t *bdata = pgdat->bdata;
63 unsigned long mapsize = ((end - start)+7)/8; 63 unsigned long mapsize = ((end - start)+7)/8;
64 static struct pglist_data *pgdat_last; 64
65 65 pgdat->pgdat_next = pgdat_list;
66 pgdat->pgdat_next = NULL; 66 pgdat_list = pgdat;
67 /* Add new nodes last so that bootmem always starts
68 searching in the first nodes, not the last ones */
69 if (pgdat_last)
70 pgdat_last->pgdat_next = pgdat;
71 else {
72 pgdat_list = pgdat;
73 pgdat_last = pgdat;
74 }
75 67
76 mapsize = ALIGN(mapsize, sizeof(long)); 68 mapsize = ALIGN(mapsize, sizeof(long));
77 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); 69 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
diff --git a/mm/mremap.c b/mm/mremap.c
index a32fed454bd7..f343fc73a8bd 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -141,10 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
141 if (dst) { 141 if (dst) {
142 pte_t pte; 142 pte_t pte;
143 pte = ptep_clear_flush(vma, old_addr, src); 143 pte = ptep_clear_flush(vma, old_addr, src);
144
144 /* ZERO_PAGE can be dependant on virtual addr */ 145 /* ZERO_PAGE can be dependant on virtual addr */
145 if (pfn_valid(pte_pfn(pte)) && 146 pte = move_pte(pte, new_vma->vm_page_prot,
146 pte_page(pte) == ZERO_PAGE(old_addr)) 147 old_addr, new_addr);
147 pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot));
148 set_pte_at(mm, new_addr, dst, pte); 148 set_pte_at(mm, new_addr, dst, pte);
149 } else 149 } else
150 error = -ENOMEM; 150 error = -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
index 437d3388054b..5cbbdfa6dd0e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define SIZE_L3 (1 + MAX_NUMNODES) 308#define SIZE_L3 (1 + MAX_NUMNODES)
309 309
310/* 310/*
311 * This function may be completely optimized away if 311 * This function must be completely optimized away if
312 * a constant is passed to it. Mostly the same as 312 * a constant is passed to it. Mostly the same as
313 * what is in linux/slab.h except it returns an 313 * what is in linux/slab.h except it returns an
314 * index. 314 * index.
315 */ 315 */
316static inline int index_of(const size_t size) 316static __always_inline int index_of(const size_t size)
317{ 317{
318 if (__builtin_constant_p(size)) { 318 if (__builtin_constant_p(size)) {
319 int i = 0; 319 int i = 0;
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size)
329 extern void __bad_size(void); 329 extern void __bad_size(void);
330 __bad_size(); 330 __bad_size();
331 } 331 }
332 } 332 } else
333 BUG();
333 return 0; 334 return 0;
334} 335}
335 336
@@ -639,7 +640,7 @@ static enum {
639 640
640static DEFINE_PER_CPU(struct work_struct, reap_work); 641static DEFINE_PER_CPU(struct work_struct, reap_work);
641 642
642static void free_block(kmem_cache_t* cachep, void** objpp, int len); 643static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
643static void enable_cpucache (kmem_cache_t *cachep); 644static void enable_cpucache (kmem_cache_t *cachep);
644static void cache_reap (void *unused); 645static void cache_reap (void *unused);
645static int __node_shrink(kmem_cache_t *cachep, int node); 646static int __node_shrink(kmem_cache_t *cachep, int node);
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
804 805
805 if (ac->avail) { 806 if (ac->avail) {
806 spin_lock(&rl3->list_lock); 807 spin_lock(&rl3->list_lock);
807 free_block(cachep, ac->entry, ac->avail); 808 free_block(cachep, ac->entry, ac->avail, node);
808 ac->avail = 0; 809 ac->avail = 0;
809 spin_unlock(&rl3->list_lock); 810 spin_unlock(&rl3->list_lock);
810 } 811 }
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
925 /* Free limit for this kmem_list3 */ 926 /* Free limit for this kmem_list3 */
926 l3->free_limit -= cachep->batchcount; 927 l3->free_limit -= cachep->batchcount;
927 if (nc) 928 if (nc)
928 free_block(cachep, nc->entry, nc->avail); 929 free_block(cachep, nc->entry, nc->avail, node);
929 930
930 if (!cpus_empty(mask)) { 931 if (!cpus_empty(mask)) {
931 spin_unlock(&l3->list_lock); 932 spin_unlock(&l3->list_lock);
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
934 935
935 if (l3->shared) { 936 if (l3->shared) {
936 free_block(cachep, l3->shared->entry, 937 free_block(cachep, l3->shared->entry,
937 l3->shared->avail); 938 l3->shared->avail, node);
938 kfree(l3->shared); 939 kfree(l3->shared);
939 l3->shared = NULL; 940 l3->shared = NULL;
940 } 941 }
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg)
1882{ 1883{
1883 kmem_cache_t *cachep = (kmem_cache_t*)arg; 1884 kmem_cache_t *cachep = (kmem_cache_t*)arg;
1884 struct array_cache *ac; 1885 struct array_cache *ac;
1886 int node = numa_node_id();
1885 1887
1886 check_irq_off(); 1888 check_irq_off();
1887 ac = ac_data(cachep); 1889 ac = ac_data(cachep);
1888 spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); 1890 spin_lock(&cachep->nodelists[node]->list_lock);
1889 free_block(cachep, ac->entry, ac->avail); 1891 free_block(cachep, ac->entry, ac->avail, node);
1890 spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); 1892 spin_unlock(&cachep->nodelists[node]->list_lock);
1891 ac->avail = 0; 1893 ac->avail = 0;
1892} 1894}
1893 1895
@@ -2508,16 +2510,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2508#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2510#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2509#endif 2511#endif
2510 2512
2511 2513static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2512static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2513{ 2514{
2514 unsigned long save_flags;
2515 void* objp; 2515 void* objp;
2516 struct array_cache *ac; 2516 struct array_cache *ac;
2517 2517
2518 cache_alloc_debugcheck_before(cachep, flags); 2518 check_irq_off();
2519
2520 local_irq_save(save_flags);
2521 ac = ac_data(cachep); 2519 ac = ac_data(cachep);
2522 if (likely(ac->avail)) { 2520 if (likely(ac->avail)) {
2523 STATS_INC_ALLOCHIT(cachep); 2521 STATS_INC_ALLOCHIT(cachep);
@@ -2527,6 +2525,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl
2527 STATS_INC_ALLOCMISS(cachep); 2525 STATS_INC_ALLOCMISS(cachep);
2528 objp = cache_alloc_refill(cachep, flags); 2526 objp = cache_alloc_refill(cachep, flags);
2529 } 2527 }
2528 return objp;
2529}
2530
2531static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2532{
2533 unsigned long save_flags;
2534 void* objp;
2535
2536 cache_alloc_debugcheck_before(cachep, flags);
2537
2538 local_irq_save(save_flags);
2539 objp = ____cache_alloc(cachep, flags);
2530 local_irq_restore(save_flags); 2540 local_irq_restore(save_flags);
2531 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 2541 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
2532 __builtin_return_address(0)); 2542 __builtin_return_address(0));
@@ -2608,7 +2618,7 @@ done:
2608/* 2618/*
2609 * Caller needs to acquire correct kmem_list's list_lock 2619 * Caller needs to acquire correct kmem_list's list_lock
2610 */ 2620 */
2611static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) 2621static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
2612{ 2622{
2613 int i; 2623 int i;
2614 struct kmem_list3 *l3; 2624 struct kmem_list3 *l3;
@@ -2617,14 +2627,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
2617 void *objp = objpp[i]; 2627 void *objp = objpp[i];
2618 struct slab *slabp; 2628 struct slab *slabp;
2619 unsigned int objnr; 2629 unsigned int objnr;
2620 int nodeid = 0;
2621 2630
2622 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2631 slabp = GET_PAGE_SLAB(virt_to_page(objp));
2623 nodeid = slabp->nodeid; 2632 l3 = cachep->nodelists[node];
2624 l3 = cachep->nodelists[nodeid];
2625 list_del(&slabp->list); 2633 list_del(&slabp->list);
2626 objnr = (objp - slabp->s_mem) / cachep->objsize; 2634 objnr = (objp - slabp->s_mem) / cachep->objsize;
2627 check_spinlock_acquired_node(cachep, nodeid); 2635 check_spinlock_acquired_node(cachep, node);
2628 check_slabp(cachep, slabp); 2636 check_slabp(cachep, slabp);
2629 2637
2630 2638
@@ -2664,13 +2672,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2664{ 2672{
2665 int batchcount; 2673 int batchcount;
2666 struct kmem_list3 *l3; 2674 struct kmem_list3 *l3;
2675 int node = numa_node_id();
2667 2676
2668 batchcount = ac->batchcount; 2677 batchcount = ac->batchcount;
2669#if DEBUG 2678#if DEBUG
2670 BUG_ON(!batchcount || batchcount > ac->avail); 2679 BUG_ON(!batchcount || batchcount > ac->avail);
2671#endif 2680#endif
2672 check_irq_off(); 2681 check_irq_off();
2673 l3 = cachep->nodelists[numa_node_id()]; 2682 l3 = cachep->nodelists[node];
2674 spin_lock(&l3->list_lock); 2683 spin_lock(&l3->list_lock);
2675 if (l3->shared) { 2684 if (l3->shared) {
2676 struct array_cache *shared_array = l3->shared; 2685 struct array_cache *shared_array = l3->shared;
@@ -2686,7 +2695,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2686 } 2695 }
2687 } 2696 }
2688 2697
2689 free_block(cachep, ac->entry, batchcount); 2698 free_block(cachep, ac->entry, batchcount, node);
2690free_done: 2699free_done:
2691#if STATS 2700#if STATS
2692 { 2701 {
@@ -2751,7 +2760,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2751 } else { 2760 } else {
2752 spin_lock(&(cachep->nodelists[nodeid])-> 2761 spin_lock(&(cachep->nodelists[nodeid])->
2753 list_lock); 2762 list_lock);
2754 free_block(cachep, &objp, 1); 2763 free_block(cachep, &objp, 1, nodeid);
2755 spin_unlock(&(cachep->nodelists[nodeid])-> 2764 spin_unlock(&(cachep->nodelists[nodeid])->
2756 list_lock); 2765 list_lock);
2757 } 2766 }
@@ -2844,7 +2853,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2844 unsigned long save_flags; 2853 unsigned long save_flags;
2845 void *ptr; 2854 void *ptr;
2846 2855
2847 if (nodeid == numa_node_id() || nodeid == -1) 2856 if (nodeid == -1)
2848 return __cache_alloc(cachep, flags); 2857 return __cache_alloc(cachep, flags);
2849 2858
2850 if (unlikely(!cachep->nodelists[nodeid])) { 2859 if (unlikely(!cachep->nodelists[nodeid])) {
@@ -2855,7 +2864,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2855 2864
2856 cache_alloc_debugcheck_before(cachep, flags); 2865 cache_alloc_debugcheck_before(cachep, flags);
2857 local_irq_save(save_flags); 2866 local_irq_save(save_flags);
2858 ptr = __cache_alloc_node(cachep, flags, nodeid); 2867 if (nodeid == numa_node_id())
2868 ptr = ____cache_alloc(cachep, flags);
2869 else
2870 ptr = __cache_alloc_node(cachep, flags, nodeid);
2859 local_irq_restore(save_flags); 2871 local_irq_restore(save_flags);
2860 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); 2872 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
2861 2873
@@ -3079,7 +3091,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
3079 3091
3080 if ((nc = cachep->nodelists[node]->shared)) 3092 if ((nc = cachep->nodelists[node]->shared))
3081 free_block(cachep, nc->entry, 3093 free_block(cachep, nc->entry,
3082 nc->avail); 3094 nc->avail, node);
3083 3095
3084 l3->shared = new; 3096 l3->shared = new;
3085 if (!cachep->nodelists[node]->alien) { 3097 if (!cachep->nodelists[node]->alien) {
@@ -3160,7 +3172,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
3160 if (!ccold) 3172 if (!ccold)
3161 continue; 3173 continue;
3162 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3174 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3163 free_block(cachep, ccold->entry, ccold->avail); 3175 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3164 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3176 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3165 kfree(ccold); 3177 kfree(ccold);
3166 } 3178 }
@@ -3240,7 +3252,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3240 if (tofree > ac->avail) { 3252 if (tofree > ac->avail) {
3241 tofree = (ac->avail+1)/2; 3253 tofree = (ac->avail+1)/2;
3242 } 3254 }
3243 free_block(cachep, ac->entry, tofree); 3255 free_block(cachep, ac->entry, tofree, node);
3244 ac->avail -= tofree; 3256 ac->avail -= tofree;
3245 memmove(ac->entry, &(ac->entry[tofree]), 3257 memmove(ac->entry, &(ac->entry[tofree]),
3246 sizeof(void*)*ac->avail); 3258 sizeof(void*)*ac->avail);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0184f510aace..1dcaeda039f4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1381 error = bd_claim(bdev, sys_swapon); 1381 error = bd_claim(bdev, sys_swapon);
1382 if (error < 0) { 1382 if (error < 0) {
1383 bdev = NULL; 1383 bdev = NULL;
1384 error = -EINVAL;
1384 goto bad_swap; 1385 goto bad_swap;
1385 } 1386 }
1386 p->old_block_size = block_size(bdev); 1387 p->old_block_size = block_size(bdev);
diff --git a/net/802/p8022.c b/net/802/p8022.c
index b24817c63ca8..2530f35241cd 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -56,7 +56,7 @@ struct datalink_proto *register_8022_client(unsigned char type,
56 56
57void unregister_8022_client(struct datalink_proto *proto) 57void unregister_8022_client(struct datalink_proto *proto)
58{ 58{
59 llc_sap_close(proto->sap); 59 llc_sap_put(proto->sap);
60 kfree(proto); 60 kfree(proto);
61} 61}
62 62
diff --git a/net/802/psnap.c b/net/802/psnap.c
index ab80b1fab53c..4d638944d933 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -106,7 +106,7 @@ module_init(snap_init);
106 106
107static void __exit snap_exit(void) 107static void __exit snap_exit(void)
108{ 108{
109 llc_sap_close(snap_sap); 109 llc_sap_put(snap_sap);
110} 110}
111 111
112module_exit(snap_exit); 112module_exit(snap_exit);
diff --git a/net/802/tr.c b/net/802/tr.c
index 1bb7dc1b85cd..1eaa3d19d8bf 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -238,7 +238,7 @@ unsigned short tr_type_trans(struct sk_buff *skb, struct net_device *dev)
238 return trllc->ethertype; 238 return trllc->ethertype;
239 } 239 }
240 240
241 return ntohs(ETH_P_802_2); 241 return ntohs(ETH_P_TR_802_2);
242} 242}
243 243
244/* 244/*
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 1d31b3a3f1e5..7982656b9c83 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -100,8 +100,7 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
100 continue; 100 continue;
101 101
102 if (to->sat_addr.s_net == ATADDR_ANYNET && 102 if (to->sat_addr.s_net == ATADDR_ANYNET &&
103 to->sat_addr.s_node == ATADDR_BCAST && 103 to->sat_addr.s_node == ATADDR_BCAST)
104 at->src_net == atif->address.s_net)
105 goto found; 104 goto found;
106 105
107 if (to->sat_addr.s_net == at->src_net && 106 if (to->sat_addr.s_net == at->src_net &&
@@ -1443,8 +1442,10 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1443 else 1442 else
1444 atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode); 1443 atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode);
1445 1444
1446 /* Not ours, so we route the packet via the correct AppleTalk iface */
1447 if (!atif) { 1445 if (!atif) {
1446 /* Not ours, so we route the packet via the correct
1447 * AppleTalk iface
1448 */
1448 atalk_route_packet(skb, dev, ddp, &ddphv, origlen); 1449 atalk_route_packet(skb, dev, ddp, &ddphv, origlen);
1449 goto out; 1450 goto out;
1450 } 1451 }
@@ -1592,9 +1593,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1592 1593
1593 if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) { 1594 if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) {
1594 rt = atrtr_find(&usat->sat_addr); 1595 rt = atrtr_find(&usat->sat_addr);
1595 if (!rt)
1596 return -ENETUNREACH;
1597
1598 dev = rt->dev; 1596 dev = rt->dev;
1599 } else { 1597 } else {
1600 struct atalk_addr at_hint; 1598 struct atalk_addr at_hint;
@@ -1603,11 +1601,12 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1603 at_hint.s_net = at->src_net; 1601 at_hint.s_net = at->src_net;
1604 1602
1605 rt = atrtr_find(&at_hint); 1603 rt = atrtr_find(&at_hint);
1606 if (!rt)
1607 return -ENETUNREACH;
1608
1609 dev = rt->dev; 1604 dev = rt->dev;
1610 } 1605 }
1606 if (!rt)
1607 return -ENETUNREACH;
1608
1609 dev = rt->dev;
1611 1610
1612 SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", 1611 SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
1613 sk, size, dev->name); 1612 sk, size, dev->name);
@@ -1677,6 +1676,20 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1677 SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk); 1676 SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk);
1678 /* loop back */ 1677 /* loop back */
1679 skb_orphan(skb); 1678 skb_orphan(skb);
1679 if (ddp->deh_dnode == ATADDR_BCAST) {
1680 struct atalk_addr at_lo;
1681
1682 at_lo.s_node = 0;
1683 at_lo.s_net = 0;
1684
1685 rt = atrtr_find(&at_lo);
1686 if (!rt) {
1687 kfree_skb(skb);
1688 return -ENETUNREACH;
1689 }
1690 dev = rt->dev;
1691 skb->dev = dev;
1692 }
1680 ddp_dl->request(ddp_dl, skb, dev->dev_addr); 1693 ddp_dl->request(ddp_dl, skb, dev->dev_addr);
1681 } else { 1694 } else {
1682 SOCK_DEBUG(sk, "SK %p: send out.\n", sk); 1695 SOCK_DEBUG(sk, "SK %p: send out.\n", sk);
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 1c8867f7f54a..a30d0bf48063 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -50,8 +50,10 @@ void atm_reset_addr(struct atm_dev *dev)
50 struct atm_dev_addr *this, *p; 50 struct atm_dev_addr *this, *p;
51 51
52 spin_lock_irqsave(&dev->lock, flags); 52 spin_lock_irqsave(&dev->lock, flags);
53 list_for_each_entry_safe(this, p, &dev->local, entry) 53 list_for_each_entry_safe(this, p, &dev->local, entry) {
54 kfree(this); 54 list_del(&this->entry);
55 kfree(this);
56 }
55 spin_unlock_irqrestore(&dev->lock, flags); 57 spin_unlock_irqrestore(&dev->lock, flags);
56 notify_sigd(dev); 58 notify_sigd(dev);
57} 59}
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 28dab55a4387..4f54c9a5e84a 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -310,7 +310,7 @@ static int clip_constructor(struct neighbour *neigh)
310 if (neigh->type != RTN_UNICAST) return -EINVAL; 310 if (neigh->type != RTN_UNICAST) return -EINVAL;
311 311
312 rcu_read_lock(); 312 rcu_read_lock();
313 in_dev = rcu_dereference(__in_dev_get(dev)); 313 in_dev = __in_dev_get_rcu(dev);
314 if (!in_dev) { 314 if (!in_dev) {
315 rcu_read_unlock(); 315 rcu_read_unlock();
316 return -EINVAL; 316 return -EINVAL;
diff --git a/net/atm/common.c b/net/atm/common.c
index e93e838069e8..63feea49fb13 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -46,7 +46,7 @@ static void __vcc_insert_socket(struct sock *sk)
46 struct atm_vcc *vcc = atm_sk(sk); 46 struct atm_vcc *vcc = atm_sk(sk);
47 struct hlist_head *head = &vcc_hash[vcc->vci & 47 struct hlist_head *head = &vcc_hash[vcc->vci &
48 (VCC_HTABLE_SIZE - 1)]; 48 (VCC_HTABLE_SIZE - 1)];
49 sk->sk_hashent = vcc->vci & (VCC_HTABLE_SIZE - 1); 49 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
50 sk_add_node(sk, head); 50 sk_add_node(sk, head);
51} 51}
52 52
@@ -178,8 +178,6 @@ static void vcc_destroy_socket(struct sock *sk)
178 if (vcc->push) 178 if (vcc->push)
179 vcc->push(vcc, NULL); /* atmarpd has no push */ 179 vcc->push(vcc, NULL); /* atmarpd has no push */
180 180
181 vcc_remove_socket(sk); /* no more receive */
182
183 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 181 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
184 atm_return(vcc,skb->truesize); 182 atm_return(vcc,skb->truesize);
185 kfree_skb(skb); 183 kfree_skb(skb);
@@ -188,6 +186,8 @@ static void vcc_destroy_socket(struct sock *sk)
188 module_put(vcc->dev->ops->owner); 186 module_put(vcc->dev->ops->owner);
189 atm_dev_put(vcc->dev); 187 atm_dev_put(vcc->dev);
190 } 188 }
189
190 vcc_remove_socket(sk);
191} 191}
192 192
193 193
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index d89056ec44d4..a150198b05a3 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -105,17 +105,35 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
105 if (!error) 105 if (!error)
106 sock->state = SS_CONNECTED; 106 sock->state = SS_CONNECTED;
107 goto done; 107 goto done;
108 default: 108 case ATM_SETBACKEND:
109 case ATM_NEWBACKENDIF:
110 {
111 atm_backend_t backend;
112 error = get_user(backend, (atm_backend_t __user *) argp);
113 if (error)
114 goto done;
115 switch (backend) {
116 case ATM_BACKEND_PPP:
117 request_module("pppoatm");
118 break;
119 case ATM_BACKEND_BR2684:
120 request_module("br2684");
121 break;
122 }
123 }
124 break;
125 case ATMMPC_CTRL:
126 case ATMMPC_DATA:
127 request_module("mpoa");
128 break;
129 case ATMARPD_CTRL:
130 request_module("clip");
131 break;
132 case ATMLEC_CTRL:
133 request_module("lec");
109 break; 134 break;
110 } 135 }
111 136
112 if (cmd == ATMMPC_CTRL || cmd == ATMMPC_DATA)
113 request_module("mpoa");
114 if (cmd == ATMARPD_CTRL)
115 request_module("clip");
116 if (cmd == ATMLEC_CTRL)
117 request_module("lec");
118
119 error = -ENOIOCTLCMD; 137 error = -ENOIOCTLCMD;
120 138
121 down(&ioctl_mutex); 139 down(&ioctl_mutex);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a0752487026d..ad840b9afba8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -686,9 +686,19 @@ static unsigned char lec_ctrl_magic[] = {
686 0x01, 686 0x01,
687 0x01 }; 687 0x01 };
688 688
689#define LEC_DATA_DIRECT_8023 2
690#define LEC_DATA_DIRECT_8025 3
691
692static int lec_is_data_direct(struct atm_vcc *vcc)
693{
694 return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) ||
695 (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025));
696}
697
689static void 698static void
690lec_push(struct atm_vcc *vcc, struct sk_buff *skb) 699lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
691{ 700{
701 unsigned long flags;
692 struct net_device *dev = (struct net_device *)vcc->proto_data; 702 struct net_device *dev = (struct net_device *)vcc->proto_data;
693 struct lec_priv *priv = (struct lec_priv *)dev->priv; 703 struct lec_priv *priv = (struct lec_priv *)dev->priv;
694 704
@@ -728,7 +738,8 @@ lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
728 skb_queue_tail(&sk->sk_receive_queue, skb); 738 skb_queue_tail(&sk->sk_receive_queue, skb);
729 sk->sk_data_ready(sk, skb->len); 739 sk->sk_data_ready(sk, skb->len);
730 } else { /* Data frame, queue to protocol handlers */ 740 } else { /* Data frame, queue to protocol handlers */
731 unsigned char *dst; 741 struct lec_arp_table *entry;
742 unsigned char *src, *dst;
732 743
733 atm_return(vcc,skb->truesize); 744 atm_return(vcc,skb->truesize);
734 if (*(uint16_t *)skb->data == htons(priv->lecid) || 745 if (*(uint16_t *)skb->data == htons(priv->lecid) ||
@@ -741,10 +752,30 @@ lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
741 return; 752 return;
742 } 753 }
743#ifdef CONFIG_TR 754#ifdef CONFIG_TR
744 if (priv->is_trdev) dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; 755 if (priv->is_trdev)
756 dst = ((struct lecdatahdr_8025 *) skb->data)->h_dest;
745 else 757 else
746#endif 758#endif
747 dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; 759 dst = ((struct lecdatahdr_8023 *) skb->data)->h_dest;
760
761 /* If this is a Data Direct VCC, and the VCC does not match
762 * the LE_ARP cache entry, delete the LE_ARP cache entry.
763 */
764 spin_lock_irqsave(&priv->lec_arp_lock, flags);
765 if (lec_is_data_direct(vcc)) {
766#ifdef CONFIG_TR
767 if (priv->is_trdev)
768 src = ((struct lecdatahdr_8025 *) skb->data)->h_source;
769 else
770#endif
771 src = ((struct lecdatahdr_8023 *) skb->data)->h_source;
772 entry = lec_arp_find(priv, src);
773 if (entry && entry->vcc != vcc) {
774 lec_arp_remove(priv, entry);
775 kfree(entry);
776 }
777 }
778 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
748 779
749 if (!(dst[0]&0x01) && /* Never filter Multi/Broadcast */ 780 if (!(dst[0]&0x01) && /* Never filter Multi/Broadcast */
750 !priv->is_proxy && /* Proxy wants all the packets */ 781 !priv->is_proxy && /* Proxy wants all the packets */
@@ -1990,6 +2021,12 @@ lec_arp_resolve(struct lec_priv *priv, unsigned char *mac_to_find,
1990 found = entry->vcc; 2021 found = entry->vcc;
1991 goto out; 2022 goto out;
1992 } 2023 }
2024 /* If the LE_ARP cache entry is still pending, reset count to 0
2025 * so another LE_ARP request can be made for this frame.
2026 */
2027 if (entry->status == ESI_ARP_PENDING) {
2028 entry->no_tries = 0;
2029 }
1993 /* Data direct VC not yet set up, check to see if the unknown 2030 /* Data direct VC not yet set up, check to see if the unknown
1994 frame count is greater than the limit. If the limit has 2031 frame count is greater than the limit. If the limit has
1995 not been reached, allow the caller to send packet to 2032 not been reached, allow the caller to send packet to
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index f7c449ac1800..e7211a7f382c 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -217,8 +217,9 @@ void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type,
217static void purge_vcc(struct atm_vcc *vcc) 217static void purge_vcc(struct atm_vcc *vcc)
218{ 218{
219 if (sk_atm(vcc)->sk_family == PF_ATMSVC && 219 if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
220 !test_bit(ATM_VF_META,&vcc->flags)) { 220 !test_bit(ATM_VF_META, &vcc->flags)) {
221 set_bit(ATM_VF_RELEASED,&vcc->flags); 221 set_bit(ATM_VF_RELEASED, &vcc->flags);
222 clear_bit(ATM_VF_REGIS, &vcc->flags);
222 vcc_release_async(vcc, -EUNATCH); 223 vcc_release_async(vcc, -EUNATCH);
223 } 224 }
224} 225}
@@ -243,8 +244,7 @@ static void sigd_close(struct atm_vcc *vcc)
243 sk_for_each(s, node, head) { 244 sk_for_each(s, node, head) {
244 struct atm_vcc *vcc = atm_sk(s); 245 struct atm_vcc *vcc = atm_sk(s);
245 246
246 if (vcc->dev) 247 purge_vcc(vcc);
247 purge_vcc(vcc);
248 } 248 }
249 } 249 }
250 read_unlock(&vcc_sklist_lock); 250 read_unlock(&vcc_sklist_lock);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 08e46052a3e4..d7b266136bf6 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -302,6 +302,7 @@ static int svc_listen(struct socket *sock,int backlog)
302 error = -EINVAL; 302 error = -EINVAL;
303 goto out; 303 goto out;
304 } 304 }
305 vcc_insert_socket(sk);
305 set_bit(ATM_VF_WAITING, &vcc->flags); 306 set_bit(ATM_VF_WAITING, &vcc->flags);
306 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 307 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
307 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 308 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 069253f830c1..2d24fb400e0c 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -31,7 +31,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
31 31
32int br_dev_queue_push_xmit(struct sk_buff *skb) 32int br_dev_queue_push_xmit(struct sk_buff *skb)
33{ 33{
34 if (skb->len > skb->dev->mtu) 34 /* drop mtu oversized packets except tso */
35 if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
35 kfree_skb(skb); 36 kfree_skb(skb);
36 else { 37 else {
37#ifdef CONFIG_BRIDGE_NETFILTER 38#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/core/datagram.c b/net/core/datagram.c
index da9bf71421a7..81987df536eb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -211,74 +211,45 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
211int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 211int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
212 struct iovec *to, int len) 212 struct iovec *to, int len)
213{ 213{
214 int start = skb_headlen(skb); 214 int i, err, fraglen, end = 0;
215 int i, copy = start - offset; 215 struct sk_buff *next = skb_shinfo(skb)->frag_list;
216 216next_skb:
217 /* Copy header. */ 217 fraglen = skb_headlen(skb);
218 if (copy > 0) { 218 i = -1;
219 if (copy > len)
220 copy = len;
221 if (memcpy_toiovec(to, skb->data + offset, copy))
222 goto fault;
223 if ((len -= copy) == 0)
224 return 0;
225 offset += copy;
226 }
227 219
228 /* Copy paged appendix. Hmm... why does this look so complicated? */ 220 while (1) {
229 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 221 int start = end;
230 int end;
231
232 BUG_TRAP(start <= offset + len);
233 222
234 end = start + skb_shinfo(skb)->frags[i].size; 223 if ((end += fraglen) > offset) {
235 if ((copy = end - offset) > 0) { 224 int copy = end - offset, o = offset - start;
236 int err;
237 u8 *vaddr;
238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
239 struct page *page = frag->page;
240 225
241 if (copy > len) 226 if (copy > len)
242 copy = len; 227 copy = len;
243 vaddr = kmap(page); 228 if (i == -1)
244 err = memcpy_toiovec(to, vaddr + frag->page_offset + 229 err = memcpy_toiovec(to, skb->data + o, copy);
245 offset - start, copy); 230 else {
246 kunmap(page); 231 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
232 struct page *page = frag->page;
233 void *p = kmap(page) + frag->page_offset + o;
234 err = memcpy_toiovec(to, p, copy);
235 kunmap(page);
236 }
247 if (err) 237 if (err)
248 goto fault; 238 goto fault;
249 if (!(len -= copy)) 239 if (!(len -= copy))
250 return 0; 240 return 0;
251 offset += copy; 241 offset += copy;
252 } 242 }
253 start = end; 243 if (++i >= skb_shinfo(skb)->nr_frags)
244 break;
245 fraglen = skb_shinfo(skb)->frags[i].size;
254 } 246 }
255 247 if (next) {
256 if (skb_shinfo(skb)->frag_list) { 248 skb = next;
257 struct sk_buff *list = skb_shinfo(skb)->frag_list; 249 BUG_ON(skb_shinfo(skb)->frag_list);
258 250 next = skb->next;
259 for (; list; list = list->next) { 251 goto next_skb;
260 int end;
261
262 BUG_TRAP(start <= offset + len);
263
264 end = start + list->len;
265 if ((copy = end - offset) > 0) {
266 if (copy > len)
267 copy = len;
268 if (skb_copy_datagram_iovec(list,
269 offset - start,
270 to, copy))
271 goto fault;
272 if ((len -= copy) == 0)
273 return 0;
274 offset += copy;
275 }
276 start = end;
277 }
278 } 252 }
279 if (!len)
280 return 0;
281
282fault: 253fault:
283 return -EFAULT; 254 return -EFAULT;
284} 255}
diff --git a/net/core/dev.c b/net/core/dev.c
index c01511e3d0c1..9066c874e273 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -574,6 +574,8 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
574 return dev; 574 return dev;
575} 575}
576 576
577EXPORT_SYMBOL(dev_getbyhwaddr);
578
577struct net_device *dev_getfirstbyhwtype(unsigned short type) 579struct net_device *dev_getfirstbyhwtype(unsigned short type)
578{ 580{
579 struct net_device *dev; 581 struct net_device *dev;
@@ -1257,6 +1259,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1257 if (skb_checksum_help(skb, 0)) 1259 if (skb_checksum_help(skb, 0))
1258 goto out_kfree_skb; 1260 goto out_kfree_skb;
1259 1261
1262 spin_lock_prefetch(&dev->queue_lock);
1263
1260 /* Disable soft irqs for various locks below. Also 1264 /* Disable soft irqs for various locks below. Also
1261 * stops preemption for RCU. 1265 * stops preemption for RCU.
1262 */ 1266 */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 39fc55edf691..4128fc76ac3a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -61,7 +61,9 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); 61void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 62
63static struct neigh_table *neigh_tables; 63static struct neigh_table *neigh_tables;
64#ifdef CONFIG_PROC_FS
64static struct file_operations neigh_stat_seq_fops; 65static struct file_operations neigh_stat_seq_fops;
66#endif
65 67
66/* 68/*
67 Neighbour hash table buckets are protected with rwlock tbl->lock. 69 Neighbour hash table buckets are protected with rwlock tbl->lock.
@@ -725,6 +727,13 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
725 p->ucast_probes + p->app_probes + p->mcast_probes); 727 p->ucast_probes + p->app_probes + p->mcast_probes);
726} 728}
727 729
730static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
731{
732 if (unlikely(mod_timer(&n->timer, when))) {
733 printk("NEIGH: BUG, double timer add, state is %x\n",
734 n->nud_state);
735 }
736}
728 737
729/* Called when a timer expires for a neighbour entry. */ 738/* Called when a timer expires for a neighbour entry. */
730 739
@@ -809,8 +818,7 @@ static void neigh_timer_handler(unsigned long arg)
809 neigh_hold(neigh); 818 neigh_hold(neigh);
810 if (time_before(next, jiffies + HZ/2)) 819 if (time_before(next, jiffies + HZ/2))
811 next = jiffies + HZ/2; 820 next = jiffies + HZ/2;
812 neigh->timer.expires = next; 821 neigh_add_timer(neigh, next);
813 add_timer(&neigh->timer);
814 } 822 }
815 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 823 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
816 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 824 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
@@ -852,8 +860,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
852 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 860 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
853 neigh->nud_state = NUD_INCOMPLETE; 861 neigh->nud_state = NUD_INCOMPLETE;
854 neigh_hold(neigh); 862 neigh_hold(neigh);
855 neigh->timer.expires = now + 1; 863 neigh_add_timer(neigh, now + 1);
856 add_timer(&neigh->timer);
857 } else { 864 } else {
858 neigh->nud_state = NUD_FAILED; 865 neigh->nud_state = NUD_FAILED;
859 write_unlock_bh(&neigh->lock); 866 write_unlock_bh(&neigh->lock);
@@ -866,8 +873,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
866 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); 873 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 neigh_hold(neigh); 874 neigh_hold(neigh);
868 neigh->nud_state = NUD_DELAY; 875 neigh->nud_state = NUD_DELAY;
869 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; 876 neigh_add_timer(neigh,
870 add_timer(&neigh->timer); 877 jiffies + neigh->parms->delay_probe_time);
871 } 878 }
872 879
873 if (neigh->nud_state == NUD_INCOMPLETE) { 880 if (neigh->nud_state == NUD_INCOMPLETE) {
@@ -1013,10 +1020,10 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1013 neigh_del_timer(neigh); 1020 neigh_del_timer(neigh);
1014 if (new & NUD_IN_TIMER) { 1021 if (new & NUD_IN_TIMER) {
1015 neigh_hold(neigh); 1022 neigh_hold(neigh);
1016 neigh->timer.expires = jiffies + 1023 neigh_add_timer(neigh, (jiffies +
1017 ((new & NUD_REACHABLE) ? 1024 ((new & NUD_REACHABLE) ?
1018 neigh->parms->reachable_time : 0); 1025 neigh->parms->reachable_time :
1019 add_timer(&neigh->timer); 1026 0)));
1020 } 1027 }
1021 neigh->nud_state = new; 1028 neigh->nud_state = new;
1022 } 1029 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5265dfd69928..802fe11efad0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
703 703
704 if (!np->local_ip) { 704 if (!np->local_ip) {
705 rcu_read_lock(); 705 rcu_read_lock();
706 in_dev = __in_dev_get(ndev); 706 in_dev = __in_dev_get_rcu(ndev);
707 707
708 if (!in_dev || !in_dev->ifa_list) { 708 if (!in_dev || !in_dev->ifa_list) {
709 rcu_read_unlock(); 709 rcu_read_unlock();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index ef430b1e8e42..5f043d346694 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -186,7 +186,7 @@
186 186
187/* Used to help with determining the pkts on receive */ 187/* Used to help with determining the pkts on receive */
188#define PKTGEN_MAGIC 0xbe9be955 188#define PKTGEN_MAGIC 0xbe9be955
189#define PG_PROC_DIR "pktgen" 189#define PG_PROC_DIR "net/pktgen"
190 190
191#define MAX_CFLOWS 65536 191#define MAX_CFLOWS 65536
192 192
@@ -1476,18 +1476,7 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer,
1476 1476
1477static int create_proc_dir(void) 1477static int create_proc_dir(void)
1478{ 1478{
1479 int len; 1479 pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL);
1480 /* does proc_dir already exists */
1481 len = strlen(PG_PROC_DIR);
1482
1483 for (pg_proc_dir = proc_net->subdir; pg_proc_dir; pg_proc_dir=pg_proc_dir->next) {
1484 if ((pg_proc_dir->namelen == len) &&
1485 (! memcmp(pg_proc_dir->name, PG_PROC_DIR, len)))
1486 break;
1487 }
1488
1489 if (!pg_proc_dir)
1490 pg_proc_dir = create_proc_entry(PG_PROC_DIR, S_IFDIR, proc_net);
1491 1480
1492 if (!pg_proc_dir) 1481 if (!pg_proc_dir)
1493 return -ENODEV; 1482 return -ENODEV;
@@ -1497,7 +1486,7 @@ static int create_proc_dir(void)
1497 1486
1498static int remove_proc_dir(void) 1487static int remove_proc_dir(void)
1499{ 1488{
1500 remove_proc_entry(PG_PROC_DIR, proc_net); 1489 remove_proc_entry(PG_PROC_DIR, NULL);
1501 return 0; 1490 return 0;
1502} 1491}
1503 1492
@@ -1678,13 +1667,12 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1678 struct in_device *in_dev; 1667 struct in_device *in_dev;
1679 1668
1680 rcu_read_lock(); 1669 rcu_read_lock();
1681 in_dev = __in_dev_get(pkt_dev->odev); 1670 in_dev = __in_dev_get_rcu(pkt_dev->odev);
1682 if (in_dev) { 1671 if (in_dev) {
1683 if (in_dev->ifa_list) { 1672 if (in_dev->ifa_list) {
1684 pkt_dev->saddr_min = in_dev->ifa_list->ifa_address; 1673 pkt_dev->saddr_min = in_dev->ifa_list->ifa_address;
1685 pkt_dev->saddr_max = pkt_dev->saddr_min; 1674 pkt_dev->saddr_max = pkt_dev->saddr_min;
1686 } 1675 }
1687 __in_dev_put(in_dev);
1688 } 1676 }
1689 rcu_read_unlock(); 1677 rcu_read_unlock();
1690 } 1678 }
@@ -2908,7 +2896,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char* ifname)
2908 pkt_dev->udp_dst_max = 9; 2896 pkt_dev->udp_dst_max = 9;
2909 2897
2910 strncpy(pkt_dev->ifname, ifname, 31); 2898 strncpy(pkt_dev->ifname, ifname, 31);
2911 sprintf(pkt_dev->fname, "net/%s/%s", PG_PROC_DIR, ifname); 2899 sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname);
2912 2900
2913 if (! pktgen_setup_dev(pkt_dev)) { 2901 if (! pktgen_setup_dev(pkt_dev)) {
2914 printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); 2902 printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
@@ -2981,7 +2969,7 @@ static int pktgen_create_thread(const char* name, int cpu)
2981 spin_lock_init(&t->if_lock); 2969 spin_lock_init(&t->if_lock);
2982 t->cpu = cpu; 2970 t->cpu = cpu;
2983 2971
2984 sprintf(t->fname, "net/%s/%s", PG_PROC_DIR, t->name); 2972 sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name);
2985 t->proc_ent = create_proc_entry(t->fname, 0600, NULL); 2973 t->proc_ent = create_proc_entry(t->fname, 0600, NULL);
2986 if (!t->proc_ent) { 2974 if (!t->proc_ent) {
2987 printk("pktgen: cannot create %s procfs entry.\n", t->fname); 2975 printk("pktgen: cannot create %s procfs entry.\n", t->fname);
@@ -3064,7 +3052,7 @@ static int __init pg_init(void)
3064 3052
3065 create_proc_dir(); 3053 create_proc_dir();
3066 3054
3067 sprintf(module_fname, "net/%s/pgctrl", PG_PROC_DIR); 3055 sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR);
3068 module_proc_ent = create_proc_entry(module_fname, 0600, NULL); 3056 module_proc_ent = create_proc_entry(module_fname, 0600, NULL);
3069 if (!module_proc_ent) { 3057 if (!module_proc_ent) {
3070 printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname); 3058 printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f80a28785610..0e9431b59fb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,8 +71,6 @@
71static kmem_cache_t *skbuff_head_cache __read_mostly; 71static kmem_cache_t *skbuff_head_cache __read_mostly;
72static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73 73
74struct timeval __read_mostly skb_tv_base;
75
76/* 74/*
77 * Keep out-of-line to prevent kernel bloat. 75 * Keep out-of-line to prevent kernel bloat.
78 * __builtin_return_address is not used because it is not always 76 * __builtin_return_address is not used because it is not always
@@ -1708,8 +1706,6 @@ void __init skb_init(void)
1708 NULL, NULL); 1706 NULL, NULL);
1709 if (!skbuff_fclone_cache) 1707 if (!skbuff_fclone_cache)
1710 panic("cannot create skbuff cache"); 1708 panic("cannot create skbuff cache");
1711
1712 do_gettimeofday(&skb_tv_base);
1713} 1709}
1714 1710
1715EXPORT_SYMBOL(___pskb_trim); 1711EXPORT_SYMBOL(___pskb_trim);
@@ -1743,4 +1739,3 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
1743EXPORT_SYMBOL(skb_seq_read); 1739EXPORT_SYMBOL(skb_seq_read);
1744EXPORT_SYMBOL(skb_abort_seq_read); 1740EXPORT_SYMBOL(skb_abort_seq_read);
1745EXPORT_SYMBOL(skb_find_text); 1741EXPORT_SYMBOL(skb_find_text);
1746EXPORT_SYMBOL(skb_tv_base);
diff --git a/net/core/sock.c b/net/core/sock.c
index ac63b56e23b2..928d2a1d6d8e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -660,16 +660,20 @@ struct sock *sk_alloc(int family, unsigned int __nocast priority,
660 sock_lock_init(sk); 660 sock_lock_init(sk);
661 } 661 }
662 662
663 if (security_sk_alloc(sk, family, priority)) { 663 if (security_sk_alloc(sk, family, priority))
664 if (slab != NULL) 664 goto out_free;
665 kmem_cache_free(slab, sk); 665
666 else 666 if (!try_module_get(prot->owner))
667 kfree(sk); 667 goto out_free;
668 sk = NULL;
669 } else
670 __module_get(prot->owner);
671 } 668 }
672 return sk; 669 return sk;
670
671out_free:
672 if (slab != NULL)
673 kmem_cache_free(slab, sk);
674 else
675 kfree(sk);
676 return NULL;
673} 677}
674 678
675void sk_free(struct sock *sk) 679void sk_free(struct sock *sk)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 40fe6afacde6..ae088d1347af 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -62,27 +62,27 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
62 const int dif = sk->sk_bound_dev_if; 62 const int dif = sk->sk_bound_dev_if;
63 INET_ADDR_COOKIE(acookie, saddr, daddr) 63 INET_ADDR_COOKIE(acookie, saddr, daddr)
64 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 64 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
65 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, 65 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
66 dccp_hashinfo.ehash_size); 66 struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash);
67 struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash];
68 const struct sock *sk2; 67 const struct sock *sk2;
69 const struct hlist_node *node; 68 const struct hlist_node *node;
70 struct inet_timewait_sock *tw; 69 struct inet_timewait_sock *tw;
71 70
71 prefetch(head->chain.first);
72 write_lock(&head->lock); 72 write_lock(&head->lock);
73 73
74 /* Check TIME-WAIT sockets first. */ 74 /* Check TIME-WAIT sockets first. */
75 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { 75 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
76 tw = inet_twsk(sk2); 76 tw = inet_twsk(sk2);
77 77
78 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 78 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
79 goto not_unique; 79 goto not_unique;
80 } 80 }
81 tw = NULL; 81 tw = NULL;
82 82
83 /* And established part... */ 83 /* And established part... */
84 sk_for_each(sk2, node, &head->chain) { 84 sk_for_each(sk2, node, &head->chain) {
85 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 85 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
86 goto not_unique; 86 goto not_unique;
87 } 87 }
88 88
@@ -90,7 +90,7 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
90 * in hash table socket with a funny identity. */ 90 * in hash table socket with a funny identity. */
91 inet->num = lport; 91 inet->num = lport;
92 inet->sport = htons(lport); 92 inet->sport = htons(lport);
93 sk->sk_hashent = hash; 93 sk->sk_hash = hash;
94 BUG_TRAP(sk_unhashed(sk)); 94 BUG_TRAP(sk_unhashed(sk));
95 __sk_add_node(sk, &head->chain); 95 __sk_add_node(sk, &head->chain);
96 sock_prot_inc_use(sk->sk_prot); 96 sock_prot_inc_use(sk->sk_prot);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 4a62093eb343..34fdac51df96 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -406,7 +406,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
406 unsigned long network = 0; 406 unsigned long network = 0;
407 407
408 rcu_read_lock(); 408 rcu_read_lock();
409 idev = __in_dev_get(dev); 409 idev = __in_dev_get_rcu(dev);
410 if (idev) { 410 if (idev) {
411 if (idev->ifa_list) 411 if (idev->ifa_list)
412 network = ntohl(idev->ifa_list->ifa_address) & 412 network = ntohl(idev->ifa_list->ifa_address) &
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 87a052a9a84f..68a5ca866442 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -146,6 +146,19 @@ int eth_rebuild_header(struct sk_buff *skb)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline unsigned int compare_eth_addr(const unsigned char *__a, const unsigned char *__b)
150{
151 const unsigned short *dest = (unsigned short *) __a;
152 const unsigned short *devaddr = (unsigned short *) __b;
153 unsigned int res;
154
155 BUILD_BUG_ON(ETH_ALEN != 6);
156 res = ((dest[0] ^ devaddr[0]) |
157 (dest[1] ^ devaddr[1]) |
158 (dest[2] ^ devaddr[2])) != 0;
159
160 return res;
161}
149 162
150/* 163/*
151 * Determine the packet's protocol ID. The rule here is that we 164 * Determine the packet's protocol ID. The rule here is that we
@@ -158,16 +171,15 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
158 struct ethhdr *eth; 171 struct ethhdr *eth;
159 unsigned char *rawp; 172 unsigned char *rawp;
160 173
161 skb->mac.raw=skb->data; 174 skb->mac.raw = skb->data;
162 skb_pull(skb,ETH_HLEN); 175 skb_pull(skb,ETH_HLEN);
163 eth = eth_hdr(skb); 176 eth = eth_hdr(skb);
164 177
165 if(*eth->h_dest&1) 178 if (*eth->h_dest&1) {
166 { 179 if (!compare_eth_addr(eth->h_dest, dev->broadcast))
167 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) 180 skb->pkt_type = PACKET_BROADCAST;
168 skb->pkt_type=PACKET_BROADCAST;
169 else 181 else
170 skb->pkt_type=PACKET_MULTICAST; 182 skb->pkt_type = PACKET_MULTICAST;
171 } 183 }
172 184
173 /* 185 /*
@@ -178,10 +190,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
178 * seems to set IFF_PROMISC. 190 * seems to set IFF_PROMISC.
179 */ 191 */
180 192
181 else if(1 /*dev->flags&IFF_PROMISC*/) 193 else if(1 /*dev->flags&IFF_PROMISC*/) {
182 { 194 if (unlikely(compare_eth_addr(eth->h_dest, dev->dev_addr)))
183 if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN)) 195 skb->pkt_type = PACKET_OTHERHOST;
184 skb->pkt_type=PACKET_OTHERHOST;
185 } 196 }
186 197
187 if (ntohs(eth->h_proto) >= 1536) 198 if (ntohs(eth->h_proto) >= 1536)
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 5714692e82b4..f66d792cd204 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -230,7 +230,7 @@ static int __init ieee80211_init(void)
230 struct proc_dir_entry *e; 230 struct proc_dir_entry *e;
231 231
232 ieee80211_debug_level = debug; 232 ieee80211_debug_level = debug;
233 ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, proc_net); 233 ieee80211_proc = proc_mkdir(DRV_NAME, proc_net);
234 if (ieee80211_proc == NULL) { 234 if (ieee80211_proc == NULL) {
235 IEEE80211_ERROR("Unable to create " DRV_NAME 235 IEEE80211_ERROR("Unable to create " DRV_NAME
236 " proc directory\n"); 236 " proc directory\n");
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 8d87897d7eb7..e860777ab8d3 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -187,7 +187,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb)
187} 187}
188 188
189static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 189static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
190 int gfp_mask) 190 unsigned int gfp_mask)
191{ 191{
192 struct ieee80211_txb *txb; 192 struct ieee80211_txb *txb;
193 int i; 193 int i;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8bf312bdea13..b425748f02d7 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -241,7 +241,7 @@ static int arp_constructor(struct neighbour *neigh)
241 neigh->type = inet_addr_type(addr); 241 neigh->type = inet_addr_type(addr);
242 242
243 rcu_read_lock(); 243 rcu_read_lock();
244 in_dev = rcu_dereference(__in_dev_get(dev)); 244 in_dev = __in_dev_get_rcu(dev);
245 if (in_dev == NULL) { 245 if (in_dev == NULL) {
246 rcu_read_unlock(); 246 rcu_read_unlock();
247 return -EINVAL; 247 return -EINVAL;
@@ -697,12 +697,6 @@ void arp_send(int type, int ptype, u32 dest_ip,
697 arp_xmit(skb); 697 arp_xmit(skb);
698} 698}
699 699
700static void parp_redo(struct sk_buff *skb)
701{
702 nf_reset(skb);
703 arp_rcv(skb, skb->dev, NULL, skb->dev);
704}
705
706/* 700/*
707 * Process an arp request. 701 * Process an arp request.
708 */ 702 */
@@ -922,6 +916,11 @@ out:
922 return 0; 916 return 0;
923} 917}
924 918
919static void parp_redo(struct sk_buff *skb)
920{
921 arp_process(skb);
922}
923
925 924
926/* 925/*
927 * Receive an arp request from the device layer. 926 * Receive an arp request from the device layer.
@@ -990,8 +989,8 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev)
990 ipv4_devconf.proxy_arp = 1; 989 ipv4_devconf.proxy_arp = 1;
991 return 0; 990 return 0;
992 } 991 }
993 if (__in_dev_get(dev)) { 992 if (__in_dev_get_rtnl(dev)) {
994 __in_dev_get(dev)->cnf.proxy_arp = 1; 993 __in_dev_get_rtnl(dev)->cnf.proxy_arp = 1;
995 return 0; 994 return 0;
996 } 995 }
997 return -ENXIO; 996 return -ENXIO;
@@ -1096,8 +1095,8 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev)
1096 ipv4_devconf.proxy_arp = 0; 1095 ipv4_devconf.proxy_arp = 0;
1097 return 0; 1096 return 0;
1098 } 1097 }
1099 if (__in_dev_get(dev)) { 1098 if (__in_dev_get_rtnl(dev)) {
1100 __in_dev_get(dev)->cnf.proxy_arp = 0; 1099 __in_dev_get_rtnl(dev)->cnf.proxy_arp = 0;
1101 return 0; 1100 return 0;
1102 } 1101 }
1103 return -ENXIO; 1102 return -ENXIO;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ba2895ae8151..74f2207e131a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -351,7 +351,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa)
351 351
352static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) 352static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
353{ 353{
354 struct in_device *in_dev = __in_dev_get(dev); 354 struct in_device *in_dev = __in_dev_get_rtnl(dev);
355 355
356 ASSERT_RTNL(); 356 ASSERT_RTNL();
357 357
@@ -449,7 +449,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
449 goto out; 449 goto out;
450 450
451 rc = -ENOBUFS; 451 rc = -ENOBUFS;
452 if ((in_dev = __in_dev_get(dev)) == NULL) { 452 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
453 in_dev = inetdev_init(dev); 453 in_dev = inetdev_init(dev);
454 if (!in_dev) 454 if (!in_dev)
455 goto out; 455 goto out;
@@ -584,7 +584,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
584 if (colon) 584 if (colon)
585 *colon = ':'; 585 *colon = ':';
586 586
587 if ((in_dev = __in_dev_get(dev)) != NULL) { 587 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
588 if (tryaddrmatch) { 588 if (tryaddrmatch) {
589 /* Matthias Andree */ 589 /* Matthias Andree */
590 /* compare label and address (4.4BSD style) */ 590 /* compare label and address (4.4BSD style) */
@@ -748,7 +748,7 @@ rarok:
748 748
749static int inet_gifconf(struct net_device *dev, char __user *buf, int len) 749static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
750{ 750{
751 struct in_device *in_dev = __in_dev_get(dev); 751 struct in_device *in_dev = __in_dev_get_rtnl(dev);
752 struct in_ifaddr *ifa; 752 struct in_ifaddr *ifa;
753 struct ifreq ifr; 753 struct ifreq ifr;
754 int done = 0; 754 int done = 0;
@@ -791,7 +791,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
791 struct in_device *in_dev; 791 struct in_device *in_dev;
792 792
793 rcu_read_lock(); 793 rcu_read_lock();
794 in_dev = __in_dev_get(dev); 794 in_dev = __in_dev_get_rcu(dev);
795 if (!in_dev) 795 if (!in_dev)
796 goto no_in_dev; 796 goto no_in_dev;
797 797
@@ -818,7 +818,7 @@ no_in_dev:
818 read_lock(&dev_base_lock); 818 read_lock(&dev_base_lock);
819 rcu_read_lock(); 819 rcu_read_lock();
820 for (dev = dev_base; dev; dev = dev->next) { 820 for (dev = dev_base; dev; dev = dev->next) {
821 if ((in_dev = __in_dev_get(dev)) == NULL) 821 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
822 continue; 822 continue;
823 823
824 for_primary_ifa(in_dev) { 824 for_primary_ifa(in_dev) {
@@ -887,7 +887,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
887 887
888 if (dev) { 888 if (dev) {
889 rcu_read_lock(); 889 rcu_read_lock();
890 if ((in_dev = __in_dev_get(dev))) 890 if ((in_dev = __in_dev_get_rcu(dev)))
891 addr = confirm_addr_indev(in_dev, dst, local, scope); 891 addr = confirm_addr_indev(in_dev, dst, local, scope);
892 rcu_read_unlock(); 892 rcu_read_unlock();
893 893
@@ -897,7 +897,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
897 read_lock(&dev_base_lock); 897 read_lock(&dev_base_lock);
898 rcu_read_lock(); 898 rcu_read_lock();
899 for (dev = dev_base; dev; dev = dev->next) { 899 for (dev = dev_base; dev; dev = dev->next) {
900 if ((in_dev = __in_dev_get(dev))) { 900 if ((in_dev = __in_dev_get_rcu(dev))) {
901 addr = confirm_addr_indev(in_dev, dst, local, scope); 901 addr = confirm_addr_indev(in_dev, dst, local, scope);
902 if (addr) 902 if (addr)
903 break; 903 break;
@@ -957,7 +957,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
957 void *ptr) 957 void *ptr)
958{ 958{
959 struct net_device *dev = ptr; 959 struct net_device *dev = ptr;
960 struct in_device *in_dev = __in_dev_get(dev); 960 struct in_device *in_dev = __in_dev_get_rtnl(dev);
961 961
962 ASSERT_RTNL(); 962 ASSERT_RTNL();
963 963
@@ -1078,7 +1078,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1078 if (idx > s_idx) 1078 if (idx > s_idx)
1079 s_ip_idx = 0; 1079 s_ip_idx = 0;
1080 rcu_read_lock(); 1080 rcu_read_lock();
1081 if ((in_dev = __in_dev_get(dev)) == NULL) { 1081 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
1082 rcu_read_unlock(); 1082 rcu_read_unlock();
1083 continue; 1083 continue;
1084 } 1084 }
@@ -1149,7 +1149,7 @@ void inet_forward_change(void)
1149 for (dev = dev_base; dev; dev = dev->next) { 1149 for (dev = dev_base; dev; dev = dev->next) {
1150 struct in_device *in_dev; 1150 struct in_device *in_dev;
1151 rcu_read_lock(); 1151 rcu_read_lock();
1152 in_dev = __in_dev_get(dev); 1152 in_dev = __in_dev_get_rcu(dev);
1153 if (in_dev) 1153 if (in_dev)
1154 in_dev->cnf.forwarding = on; 1154 in_dev->cnf.forwarding = on;
1155 rcu_read_unlock(); 1155 rcu_read_unlock();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e1379f71269..e61bc7177eb1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -173,7 +173,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
173 173
174 no_addr = rpf = 0; 174 no_addr = rpf = 0;
175 rcu_read_lock(); 175 rcu_read_lock();
176 in_dev = __in_dev_get(dev); 176 in_dev = __in_dev_get_rcu(dev);
177 if (in_dev) { 177 if (in_dev) {
178 no_addr = in_dev->ifa_list == NULL; 178 no_addr = in_dev->ifa_list == NULL;
179 rpf = IN_DEV_RPFILTER(in_dev); 179 rpf = IN_DEV_RPFILTER(in_dev);
@@ -607,7 +607,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
607static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 607static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
608{ 608{
609 struct net_device *dev = ptr; 609 struct net_device *dev = ptr;
610 struct in_device *in_dev = __in_dev_get(dev); 610 struct in_device *in_dev = __in_dev_get_rtnl(dev);
611 611
612 if (event == NETDEV_UNREGISTER) { 612 if (event == NETDEV_UNREGISTER) {
613 fib_disable_ip(dev, 2); 613 fib_disable_ip(dev, 2);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d41219e8037c..186f20c4a45e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1087,7 +1087,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
1087 rta->rta_oif = &dev->ifindex; 1087 rta->rta_oif = &dev->ifindex;
1088 if (colon) { 1088 if (colon) {
1089 struct in_ifaddr *ifa; 1089 struct in_ifaddr *ifa;
1090 struct in_device *in_dev = __in_dev_get(dev); 1090 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1091 if (!in_dev) 1091 if (!in_dev)
1092 return -ENODEV; 1092 return -ENODEV;
1093 *colon = ':'; 1093 *colon = ':';
@@ -1268,7 +1268,7 @@ int fib_sync_up(struct net_device *dev)
1268 } 1268 }
1269 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1269 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
1270 continue; 1270 continue;
1271 if (nh->nh_dev != dev || __in_dev_get(dev) == NULL) 1271 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
1272 continue; 1272 continue;
1273 alive++; 1273 alive++;
1274 spin_lock_bh(&fib_multipath_lock); 1274 spin_lock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 24eb56ae1b5a..90dca711ac9f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -188,7 +188,7 @@ struct icmp_err icmp_err_convert[] = {
188 188
189/* Control parameters for ECHO replies. */ 189/* Control parameters for ECHO replies. */
190int sysctl_icmp_echo_ignore_all; 190int sysctl_icmp_echo_ignore_all;
191int sysctl_icmp_echo_ignore_broadcasts; 191int sysctl_icmp_echo_ignore_broadcasts = 1;
192 192
193/* Control parameter - ignore bogus broadcast responses? */ 193/* Control parameter - ignore bogus broadcast responses? */
194int sysctl_icmp_ignore_bogus_error_responses; 194int sysctl_icmp_ignore_bogus_error_responses;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 70c44e4c3ceb..8b6d3939e1e6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1323,7 +1323,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
1323 } 1323 }
1324 if (dev) { 1324 if (dev) {
1325 imr->imr_ifindex = dev->ifindex; 1325 imr->imr_ifindex = dev->ifindex;
1326 idev = __in_dev_get(dev); 1326 idev = __in_dev_get_rtnl(dev);
1327 } 1327 }
1328 return idev; 1328 return idev;
1329} 1329}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4d1502a49852..f9076ef3a1a8 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,7 +20,7 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi
20 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */ 22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent]; 23 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
24 24
25 write_lock(&ehead->lock); 25 write_lock(&ehead->lock);
26 if (hlist_unhashed(&tw->tw_node)) { 26 if (hlist_unhashed(&tw->tw_node)) {
@@ -60,7 +60,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
60{ 60{
61 const struct inet_sock *inet = inet_sk(sk); 61 const struct inet_sock *inet = inet_sk(sk);
62 const struct inet_connection_sock *icsk = inet_csk(sk); 62 const struct inet_connection_sock *icsk = inet_csk(sk);
63 struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; 63 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
64 struct inet_bind_hashbucket *bhead; 64 struct inet_bind_hashbucket *bhead;
65 /* Step 1: Put TW into bind hash. Original socket stays there too. 65 /* Step 1: Put TW into bind hash. Original socket stays there too.
66 Note, that any socket with inet->num != 0 MUST be bound in 66 Note, that any socket with inet->num != 0 MUST be bound in
@@ -106,7 +106,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
106 tw->tw_dport = inet->dport; 106 tw->tw_dport = inet->dport;
107 tw->tw_family = sk->sk_family; 107 tw->tw_family = sk->sk_family;
108 tw->tw_reuse = sk->sk_reuse; 108 tw->tw_reuse = sk->sk_reuse;
109 tw->tw_hashent = sk->sk_hashent; 109 tw->tw_hash = sk->sk_hash;
110 tw->tw_ipv6only = 0; 110 tw->tw_ipv6only = 0;
111 tw->tw_prot = sk->sk_prot_creator; 111 tw->tw_prot = sk->sk_prot_creator;
112 atomic_set(&tw->tw_refcnt, 1); 112 atomic_set(&tw->tw_refcnt, 1);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f0d5740d7e22..896ce3f8f53a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1104,10 +1104,10 @@ static int ipgre_open(struct net_device *dev)
1104 return -EADDRNOTAVAIL; 1104 return -EADDRNOTAVAIL;
1105 dev = rt->u.dst.dev; 1105 dev = rt->u.dst.dev;
1106 ip_rt_put(rt); 1106 ip_rt_put(rt);
1107 if (__in_dev_get(dev) == NULL) 1107 if (__in_dev_get_rtnl(dev) == NULL)
1108 return -EADDRNOTAVAIL; 1108 return -EADDRNOTAVAIL;
1109 t->mlink = dev->ifindex; 1109 t->mlink = dev->ifindex;
1110 ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr); 1110 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1111 } 1111 }
1112 return 0; 1112 return 0;
1113} 1113}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9dbf5909f3a6..302b7eb507c9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -149,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
149 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) { 149 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
150 dev->flags |= IFF_MULTICAST; 150 dev->flags |= IFF_MULTICAST;
151 151
152 in_dev = __in_dev_get(dev); 152 in_dev = __in_dev_get_rtnl(dev);
153 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL) 153 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
154 goto failure; 154 goto failure;
155 in_dev->cnf.rp_filter = 0; 155 in_dev->cnf.rp_filter = 0;
@@ -278,7 +278,7 @@ static int vif_delete(int vifi)
278 278
279 dev_set_allmulti(dev, -1); 279 dev_set_allmulti(dev, -1);
280 280
281 if ((in_dev = __in_dev_get(dev)) != NULL) { 281 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
282 in_dev->cnf.mc_forwarding--; 282 in_dev->cnf.mc_forwarding--;
283 ip_rt_multicast_event(in_dev); 283 ip_rt_multicast_event(in_dev);
284 } 284 }
@@ -421,7 +421,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
421 return -EINVAL; 421 return -EINVAL;
422 } 422 }
423 423
424 if ((in_dev = __in_dev_get(dev)) == NULL) 424 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
425 return -EADDRNOTAVAIL; 425 return -EADDRNOTAVAIL;
426 in_dev->cnf.mc_forwarding++; 426 in_dev->cnf.mc_forwarding++;
427 dev_set_allmulti(dev, +1); 427 dev_set_allmulti(dev, +1);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 3cf9b451675c..2cd7e7d1ac90 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -537,6 +537,17 @@ config IP_NF_TARGET_TCPMSS
537 537
538 To compile it as a module, choose M here. If unsure, say N. 538 To compile it as a module, choose M here. If unsure, say N.
539 539
540config IP_NF_TARGET_NFQUEUE
541 tristate "NFQUEUE Target Support"
542 depends on IP_NF_IPTABLES
543 help
544 This Target replaced the old obsolete QUEUE target.
545
546 As opposed to QUEUE, it supports 65535 different queues,
547 not just one.
548
549 To compile it as a module, choose M here. If unsure, say N.
550
540# NAT + specific targets 551# NAT + specific targets
541config IP_NF_NAT 552config IP_NF_NAT
542 tristate "Full NAT" 553 tristate "Full NAT"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 3d45d3c0283c..dab4b58dd31e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -4,7 +4,8 @@
4 4
5# objects for the standalone - connection tracking / NAT 5# objects for the standalone - connection tracking / NAT
6ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o 6ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
7iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o 7ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
8iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o
8 9
9ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o 10ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o
10ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o 11ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o
@@ -40,7 +41,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
40# the three instances of ip_tables 41# the three instances of ip_tables
41obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o 42obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o 43obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
43obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o 44obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o ip_nat.o
44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 45obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
45 46
46# matches 47# matches
@@ -92,6 +93,7 @@ obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
92obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o 93obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
93obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 94obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
94obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o 95obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
96obj-$(CONFIG_IP_NF_TARGET_NFQUEUE) += ipt_NFQUEUE.o
95 97
96# generic ARP tables 98# generic ARP tables
97obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o 99obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
@@ -101,4 +103,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
101obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o 103obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
102 104
103obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o 105obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
104obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ipt_NFQUEUE.o
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index dc20881004bc..fa3f914117ec 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -65,7 +65,7 @@ static int help(struct sk_buff **pskb,
65 65
66 /* increase the UDP timeout of the master connection as replies from 66 /* increase the UDP timeout of the master connection as replies from
67 * Amanda clients to the server can be quite delayed */ 67 * Amanda clients to the server can be quite delayed */
68 ip_ct_refresh_acct(ct, ctinfo, NULL, master_timeout * HZ); 68 ip_ct_refresh(ct, *pskb, master_timeout * HZ);
69 69
70 /* No data? */ 70 /* No data? */
71 dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); 71 dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index c1f82e0c81cf..ea65dd3e517a 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -1112,45 +1112,46 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
1112 synchronize_net(); 1112 synchronize_net();
1113} 1113}
1114 1114
1115static inline void ct_add_counters(struct ip_conntrack *ct, 1115/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1116 enum ip_conntrack_info ctinfo, 1116void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1117 const struct sk_buff *skb)
1118{
1119#ifdef CONFIG_IP_NF_CT_ACCT
1120 if (skb) {
1121 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1122 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1123 ntohs(skb->nh.iph->tot_len);
1124 }
1125#endif
1126}
1127
1128/* Refresh conntrack for this many jiffies and do accounting (if skb != NULL) */
1129void ip_ct_refresh_acct(struct ip_conntrack *ct,
1130 enum ip_conntrack_info ctinfo, 1117 enum ip_conntrack_info ctinfo,
1131 const struct sk_buff *skb, 1118 const struct sk_buff *skb,
1132 unsigned long extra_jiffies) 1119 unsigned long extra_jiffies,
1120 int do_acct)
1133{ 1121{
1122 int do_event = 0;
1123
1134 IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); 1124 IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
1125 IP_NF_ASSERT(skb);
1126
1127 write_lock_bh(&ip_conntrack_lock);
1135 1128
1136 /* If not in hash table, timer will not be active yet */ 1129 /* If not in hash table, timer will not be active yet */
1137 if (!is_confirmed(ct)) { 1130 if (!is_confirmed(ct)) {
1138 ct->timeout.expires = extra_jiffies; 1131 ct->timeout.expires = extra_jiffies;
1139 ct_add_counters(ct, ctinfo, skb); 1132 do_event = 1;
1140 } else { 1133 } else {
1141 write_lock_bh(&ip_conntrack_lock);
1142 /* Need del_timer for race avoidance (may already be dying). */ 1134 /* Need del_timer for race avoidance (may already be dying). */
1143 if (del_timer(&ct->timeout)) { 1135 if (del_timer(&ct->timeout)) {
1144 ct->timeout.expires = jiffies + extra_jiffies; 1136 ct->timeout.expires = jiffies + extra_jiffies;
1145 add_timer(&ct->timeout); 1137 add_timer(&ct->timeout);
1146 /* FIXME: We loose some REFRESH events if this function 1138 do_event = 1;
1147 * is called without an skb. I'll fix this later -HW */
1148 if (skb)
1149 ip_conntrack_event_cache(IPCT_REFRESH, skb);
1150 } 1139 }
1151 ct_add_counters(ct, ctinfo, skb);
1152 write_unlock_bh(&ip_conntrack_lock);
1153 } 1140 }
1141
1142#ifdef CONFIG_IP_NF_CT_ACCT
1143 if (do_acct) {
1144 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1145 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1146 ntohs(skb->nh.iph->tot_len);
1147 }
1148#endif
1149
1150 write_unlock_bh(&ip_conntrack_lock);
1151
1152 /* must be unlocked when calling event cache */
1153 if (do_event)
1154 ip_conntrack_event_cache(IPCT_REFRESH, skb);
1154} 1155}
1155 1156
1156#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 1157#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index 79db5b70d5f6..926a6684643d 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -172,7 +172,6 @@ static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t)
172 DEBUGP("setting timeout of conntrack %p to 0\n", sibling); 172 DEBUGP("setting timeout of conntrack %p to 0\n", sibling);
173 sibling->proto.gre.timeout = 0; 173 sibling->proto.gre.timeout = 0;
174 sibling->proto.gre.stream_timeout = 0; 174 sibling->proto.gre.stream_timeout = 0;
175 /* refresh_acct will not modify counters if skb == NULL */
176 if (del_timer(&sibling->timeout)) 175 if (del_timer(&sibling->timeout))
177 sibling->timeout.function((unsigned long)sibling); 176 sibling->timeout.function((unsigned long)sibling);
178 ip_conntrack_put(sibling); 177 ip_conntrack_put(sibling);
@@ -223,8 +222,8 @@ static void pptp_destroy_siblings(struct ip_conntrack *ct)
223static inline int 222static inline int
224exp_gre(struct ip_conntrack *master, 223exp_gre(struct ip_conntrack *master,
225 u_int32_t seq, 224 u_int32_t seq,
226 u_int16_t callid, 225 __be16 callid,
227 u_int16_t peer_callid) 226 __be16 peer_callid)
228{ 227{
229 struct ip_conntrack_tuple inv_tuple; 228 struct ip_conntrack_tuple inv_tuple;
230 struct ip_conntrack_tuple exp_tuples[] = { 229 struct ip_conntrack_tuple exp_tuples[] = {
@@ -263,7 +262,7 @@ exp_gre(struct ip_conntrack *master,
263 exp_orig->mask.src.ip = 0xffffffff; 262 exp_orig->mask.src.ip = 0xffffffff;
264 exp_orig->mask.src.u.all = 0; 263 exp_orig->mask.src.u.all = 0;
265 exp_orig->mask.dst.u.all = 0; 264 exp_orig->mask.dst.u.all = 0;
266 exp_orig->mask.dst.u.gre.key = 0xffff; 265 exp_orig->mask.dst.u.gre.key = htons(0xffff);
267 exp_orig->mask.dst.ip = 0xffffffff; 266 exp_orig->mask.dst.ip = 0xffffffff;
268 exp_orig->mask.dst.protonum = 0xff; 267 exp_orig->mask.dst.protonum = 0xff;
269 268
@@ -340,7 +339,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
340 unsigned int reqlen; 339 unsigned int reqlen;
341 union pptp_ctrl_union _pptpReq, *pptpReq; 340 union pptp_ctrl_union _pptpReq, *pptpReq;
342 struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; 341 struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
343 u_int16_t msg, *cid, *pcid; 342 u_int16_t msg;
343 __be16 *cid, *pcid;
344 u_int32_t seq; 344 u_int32_t seq;
345 345
346 ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); 346 ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
@@ -485,7 +485,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
485 485
486 if (info->pns_call_id != ntohs(*pcid)) { 486 if (info->pns_call_id != ntohs(*pcid)) {
487 DEBUGP("%s for unknown CallID %u\n", 487 DEBUGP("%s for unknown CallID %u\n",
488 pptp_msg_name[msg], ntohs(*cid)); 488 pptp_msg_name[msg], ntohs(*pcid));
489 break; 489 break;
490 } 490 }
491 491
@@ -551,7 +551,8 @@ pptp_outbound_pkt(struct sk_buff **pskb,
551 unsigned int reqlen; 551 unsigned int reqlen;
552 union pptp_ctrl_union _pptpReq, *pptpReq; 552 union pptp_ctrl_union _pptpReq, *pptpReq;
553 struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; 553 struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
554 u_int16_t msg, *cid, *pcid; 554 u_int16_t msg;
555 __be16 *cid, *pcid;
555 556
556 ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); 557 ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
557 if (!ctlh) 558 if (!ctlh)
@@ -755,7 +756,7 @@ static struct ip_conntrack_helper pptp = {
755 } 756 }
756 }, 757 },
757 .mask = { .src = { .ip = 0, 758 .mask = { .src = { .ip = 0,
758 .u = { .tcp = { .port = 0xffff } } 759 .u = { .tcp = { .port = __constant_htons(0xffff) } }
759 }, 760 },
760 .dst = { .ip = 0, 761 .dst = { .ip = 0,
761 .u = { .all = 0 }, 762 .u = { .all = 0 },
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
index 71ef19d126d0..186646eb249f 100644
--- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -58,7 +58,7 @@ static int help(struct sk_buff **pskb,
58 goto out; 58 goto out;
59 59
60 rcu_read_lock(); 60 rcu_read_lock();
61 in_dev = __in_dev_get(rt->u.dst.dev); 61 in_dev = __in_dev_get_rcu(rt->u.dst.dev);
62 if (in_dev != NULL) { 62 if (in_dev != NULL) {
63 for_primary_ifa(in_dev) { 63 for_primary_ifa(in_dev) {
64 if (ifa->ifa_broadcast == iph->daddr) { 64 if (ifa->ifa_broadcast == iph->daddr) {
@@ -91,7 +91,7 @@ static int help(struct sk_buff **pskb,
91 ip_conntrack_expect_related(exp); 91 ip_conntrack_expect_related(exp);
92 ip_conntrack_expect_put(exp); 92 ip_conntrack_expect_put(exp);
93 93
94 ip_ct_refresh_acct(ct, ctinfo, NULL, timeout * HZ); 94 ip_ct_refresh(ct, *pskb, timeout * HZ);
95out: 95out:
96 return NF_ACCEPT; 96 return NF_ACCEPT;
97} 97}
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
index de3cb9db6f85..744abb9d377a 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -247,6 +247,7 @@ static int gre_packet(struct ip_conntrack *ct,
247 ct->proto.gre.stream_timeout); 247 ct->proto.gre.stream_timeout);
248 /* Also, more likely to be important, and not a probe. */ 248 /* Also, more likely to be important, and not a probe. */
249 set_bit(IPS_ASSURED_BIT, &ct->status); 249 set_bit(IPS_ASSURED_BIT, &ct->status);
250 ip_conntrack_event_cache(IPCT_STATUS, skb);
250 } else 251 } else
251 ip_ct_refresh_acct(ct, conntrackinfo, skb, 252 ip_ct_refresh_acct(ct, conntrackinfo, skb,
252 ct->proto.gre.timeout); 253 ct->proto.gre.timeout);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index a875f35e576d..59a4a0111dd3 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -416,6 +416,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
416 && newconntrack == SCTP_CONNTRACK_ESTABLISHED) { 416 && newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
417 DEBUGP("Setting assured bit\n"); 417 DEBUGP("Setting assured bit\n");
418 set_bit(IPS_ASSURED_BIT, &conntrack->status); 418 set_bit(IPS_ASSURED_BIT, &conntrack->status);
419 ip_conntrack_event_cache(IPCT_STATUS, skb);
419 } 420 }
420 421
421 return NF_ACCEPT; 422 return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 1985abc59d24..121760d6cc50 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -1014,7 +1014,8 @@ static int tcp_packet(struct ip_conntrack *conntrack,
1014 /* Set ASSURED if we see see valid ack in ESTABLISHED 1014 /* Set ASSURED if we see see valid ack in ESTABLISHED
1015 after SYN_RECV or a valid answer for a picked up 1015 after SYN_RECV or a valid answer for a picked up
1016 connection. */ 1016 connection. */
1017 set_bit(IPS_ASSURED_BIT, &conntrack->status); 1017 set_bit(IPS_ASSURED_BIT, &conntrack->status);
1018 ip_conntrack_event_cache(IPCT_STATUS, skb);
1018 } 1019 }
1019 ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout); 1020 ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout);
1020 1021
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index d3c7808010ec..dd476b191f4b 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -989,7 +989,7 @@ EXPORT_SYMBOL(need_ip_conntrack);
989EXPORT_SYMBOL(ip_conntrack_helper_register); 989EXPORT_SYMBOL(ip_conntrack_helper_register);
990EXPORT_SYMBOL(ip_conntrack_helper_unregister); 990EXPORT_SYMBOL(ip_conntrack_helper_unregister);
991EXPORT_SYMBOL(ip_ct_iterate_cleanup); 991EXPORT_SYMBOL(ip_ct_iterate_cleanup);
992EXPORT_SYMBOL(ip_ct_refresh_acct); 992EXPORT_SYMBOL(__ip_ct_refresh_acct);
993 993
994EXPORT_SYMBOL(ip_conntrack_expect_alloc); 994EXPORT_SYMBOL(ip_conntrack_expect_alloc);
995EXPORT_SYMBOL(ip_conntrack_expect_put); 995EXPORT_SYMBOL(ip_conntrack_expect_put);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index c3ea891d38e7..c5e3abd24672 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -74,12 +74,14 @@ ip_nat_proto_find_get(u_int8_t protonum)
74 74
75 return p; 75 return p;
76} 76}
77EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
77 78
78void 79void
79ip_nat_proto_put(struct ip_nat_protocol *p) 80ip_nat_proto_put(struct ip_nat_protocol *p)
80{ 81{
81 module_put(p->me); 82 module_put(p->me);
82} 83}
84EXPORT_SYMBOL_GPL(ip_nat_proto_put);
83 85
84/* We keep an extra hash for each conntrack, for fast searching. */ 86/* We keep an extra hash for each conntrack, for fast searching. */
85static inline unsigned int 87static inline unsigned int
@@ -111,6 +113,7 @@ ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck)
111 return csum_fold(csum_partial((char *)diffs, sizeof(diffs), 113 return csum_fold(csum_partial((char *)diffs, sizeof(diffs),
112 oldcheck^0xFFFF)); 114 oldcheck^0xFFFF));
113} 115}
116EXPORT_SYMBOL(ip_nat_cheat_check);
114 117
115/* Is this tuple already taken? (not by us) */ 118/* Is this tuple already taken? (not by us) */
116int 119int
@@ -127,6 +130,7 @@ ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
127 invert_tuplepr(&reply, tuple); 130 invert_tuplepr(&reply, tuple);
128 return ip_conntrack_tuple_taken(&reply, ignored_conntrack); 131 return ip_conntrack_tuple_taken(&reply, ignored_conntrack);
129} 132}
133EXPORT_SYMBOL(ip_nat_used_tuple);
130 134
131/* If we source map this tuple so reply looks like reply_tuple, will 135/* If we source map this tuple so reply looks like reply_tuple, will
132 * that meet the constraints of range. */ 136 * that meet the constraints of range. */
@@ -347,6 +351,7 @@ ip_nat_setup_info(struct ip_conntrack *conntrack,
347 351
348 return NF_ACCEPT; 352 return NF_ACCEPT;
349} 353}
354EXPORT_SYMBOL(ip_nat_setup_info);
350 355
351/* Returns true if succeeded. */ 356/* Returns true if succeeded. */
352static int 357static int
@@ -387,10 +392,10 @@ manip_pkt(u_int16_t proto,
387} 392}
388 393
389/* Do packet manipulations according to ip_nat_setup_info. */ 394/* Do packet manipulations according to ip_nat_setup_info. */
390unsigned int nat_packet(struct ip_conntrack *ct, 395unsigned int ip_nat_packet(struct ip_conntrack *ct,
391 enum ip_conntrack_info ctinfo, 396 enum ip_conntrack_info ctinfo,
392 unsigned int hooknum, 397 unsigned int hooknum,
393 struct sk_buff **pskb) 398 struct sk_buff **pskb)
394{ 399{
395 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 400 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
396 unsigned long statusbit; 401 unsigned long statusbit;
@@ -417,12 +422,13 @@ unsigned int nat_packet(struct ip_conntrack *ct,
417 } 422 }
418 return NF_ACCEPT; 423 return NF_ACCEPT;
419} 424}
425EXPORT_SYMBOL_GPL(ip_nat_packet);
420 426
421/* Dir is direction ICMP is coming from (opposite to packet it contains) */ 427/* Dir is direction ICMP is coming from (opposite to packet it contains) */
422int icmp_reply_translation(struct sk_buff **pskb, 428int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
423 struct ip_conntrack *ct, 429 struct ip_conntrack *ct,
424 enum ip_nat_manip_type manip, 430 enum ip_nat_manip_type manip,
425 enum ip_conntrack_dir dir) 431 enum ip_conntrack_dir dir)
426{ 432{
427 struct { 433 struct {
428 struct icmphdr icmp; 434 struct icmphdr icmp;
@@ -509,6 +515,7 @@ int icmp_reply_translation(struct sk_buff **pskb,
509 515
510 return 1; 516 return 1;
511} 517}
518EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation);
512 519
513/* Protocol registration. */ 520/* Protocol registration. */
514int ip_nat_protocol_register(struct ip_nat_protocol *proto) 521int ip_nat_protocol_register(struct ip_nat_protocol *proto)
@@ -525,6 +532,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto)
525 write_unlock_bh(&ip_nat_lock); 532 write_unlock_bh(&ip_nat_lock);
526 return ret; 533 return ret;
527} 534}
535EXPORT_SYMBOL(ip_nat_protocol_register);
528 536
529/* Noone stores the protocol anywhere; simply delete it. */ 537/* Noone stores the protocol anywhere; simply delete it. */
530void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) 538void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
@@ -536,6 +544,7 @@ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
536 /* Someone could be still looking at the proto in a bh. */ 544 /* Someone could be still looking at the proto in a bh. */
537 synchronize_net(); 545 synchronize_net();
538} 546}
547EXPORT_SYMBOL(ip_nat_protocol_unregister);
539 548
540#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 549#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
541 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) 550 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
@@ -582,7 +591,7 @@ EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range);
582EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr); 591EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr);
583#endif 592#endif
584 593
585int __init ip_nat_init(void) 594static int __init ip_nat_init(void)
586{ 595{
587 size_t i; 596 size_t i;
588 597
@@ -624,10 +633,14 @@ static int clean_nat(struct ip_conntrack *i, void *data)
624 return 0; 633 return 0;
625} 634}
626 635
627/* Not __exit: called from ip_nat_standalone.c:init_or_cleanup() --RR */ 636static void __exit ip_nat_cleanup(void)
628void ip_nat_cleanup(void)
629{ 637{
630 ip_ct_iterate_cleanup(&clean_nat, NULL); 638 ip_ct_iterate_cleanup(&clean_nat, NULL);
631 ip_conntrack_destroyed = NULL; 639 ip_conntrack_destroyed = NULL;
632 vfree(bysource); 640 vfree(bysource);
633} 641}
642
643MODULE_LICENSE("GPL");
644
645module_init(ip_nat_init);
646module_exit(ip_nat_cleanup);
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index d2dd5d313556..5d506e0564d5 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -199,6 +199,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
199 } 199 }
200 return 1; 200 return 1;
201} 201}
202EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
202 203
203/* Generic function for mangling variable-length address changes inside 204/* Generic function for mangling variable-length address changes inside
204 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX 205 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
@@ -256,6 +257,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
256 257
257 return 1; 258 return 1;
258} 259}
260EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
259 261
260/* Adjust one found SACK option including checksum correction */ 262/* Adjust one found SACK option including checksum correction */
261static void 263static void
@@ -399,6 +401,7 @@ ip_nat_seq_adjust(struct sk_buff **pskb,
399 401
400 return 1; 402 return 1;
401} 403}
404EXPORT_SYMBOL(ip_nat_seq_adjust);
402 405
403/* Setup NAT on this expected conntrack so it follows master. */ 406/* Setup NAT on this expected conntrack so it follows master. */
404/* If we fail to get a free NAT slot, we'll get dropped on confirm */ 407/* If we fail to get a free NAT slot, we'll get dropped on confirm */
@@ -425,3 +428,4 @@ void ip_nat_follow_master(struct ip_conntrack *ct,
425 /* hook doesn't matter, but it has to do destination manip */ 428 /* hook doesn't matter, but it has to do destination manip */
426 ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); 429 ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
427} 430}
431EXPORT_SYMBOL(ip_nat_follow_master);
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 0ff368b131f6..30cd4e18c129 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -108,8 +108,8 @@ ip_nat_fn(unsigned int hooknum,
108 case IP_CT_RELATED: 108 case IP_CT_RELATED:
109 case IP_CT_RELATED+IP_CT_IS_REPLY: 109 case IP_CT_RELATED+IP_CT_IS_REPLY:
110 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { 110 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
111 if (!icmp_reply_translation(pskb, ct, maniptype, 111 if (!ip_nat_icmp_reply_translation(pskb, ct, maniptype,
112 CTINFO2DIR(ctinfo))) 112 CTINFO2DIR(ctinfo)))
113 return NF_DROP; 113 return NF_DROP;
114 else 114 else
115 return NF_ACCEPT; 115 return NF_ACCEPT;
@@ -152,7 +152,7 @@ ip_nat_fn(unsigned int hooknum,
152 } 152 }
153 153
154 IP_NF_ASSERT(info); 154 IP_NF_ASSERT(info);
155 return nat_packet(ct, ctinfo, hooknum, pskb); 155 return ip_nat_packet(ct, ctinfo, hooknum, pskb);
156} 156}
157 157
158static unsigned int 158static unsigned int
@@ -325,15 +325,10 @@ static int init_or_cleanup(int init)
325 printk("ip_nat_init: can't setup rules.\n"); 325 printk("ip_nat_init: can't setup rules.\n");
326 goto cleanup_nothing; 326 goto cleanup_nothing;
327 } 327 }
328 ret = ip_nat_init();
329 if (ret < 0) {
330 printk("ip_nat_init: can't setup rules.\n");
331 goto cleanup_rule_init;
332 }
333 ret = nf_register_hook(&ip_nat_in_ops); 328 ret = nf_register_hook(&ip_nat_in_ops);
334 if (ret < 0) { 329 if (ret < 0) {
335 printk("ip_nat_init: can't register in hook.\n"); 330 printk("ip_nat_init: can't register in hook.\n");
336 goto cleanup_nat; 331 goto cleanup_rule_init;
337 } 332 }
338 ret = nf_register_hook(&ip_nat_out_ops); 333 ret = nf_register_hook(&ip_nat_out_ops);
339 if (ret < 0) { 334 if (ret < 0) {
@@ -374,8 +369,6 @@ static int init_or_cleanup(int init)
374 nf_unregister_hook(&ip_nat_out_ops); 369 nf_unregister_hook(&ip_nat_out_ops);
375 cleanup_inops: 370 cleanup_inops:
376 nf_unregister_hook(&ip_nat_in_ops); 371 nf_unregister_hook(&ip_nat_in_ops);
377 cleanup_nat:
378 ip_nat_cleanup();
379 cleanup_rule_init: 372 cleanup_rule_init:
380 ip_nat_rule_cleanup(); 373 ip_nat_rule_cleanup();
381 cleanup_nothing: 374 cleanup_nothing:
@@ -395,14 +388,4 @@ static void __exit fini(void)
395module_init(init); 388module_init(init);
396module_exit(fini); 389module_exit(fini);
397 390
398EXPORT_SYMBOL(ip_nat_setup_info);
399EXPORT_SYMBOL(ip_nat_protocol_register);
400EXPORT_SYMBOL(ip_nat_protocol_unregister);
401EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
402EXPORT_SYMBOL_GPL(ip_nat_proto_put);
403EXPORT_SYMBOL(ip_nat_cheat_check);
404EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
405EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
406EXPORT_SYMBOL(ip_nat_used_tuple);
407EXPORT_SYMBOL(ip_nat_follow_master);
408MODULE_LICENSE("GPL"); 391MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d54f14d926f6..36339eb39e17 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -240,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
240 240
241 pmsg->packet_id = (unsigned long )entry; 241 pmsg->packet_id = (unsigned long )entry;
242 pmsg->data_len = data_len; 242 pmsg->data_len = data_len;
243 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; 243 pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
244 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; 244 pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
245 pmsg->mark = entry->skb->nfmark; 245 pmsg->mark = entry->skb->nfmark;
246 pmsg->hook = entry->info->hook; 246 pmsg->hook = entry->info->hook;
247 pmsg->hw_protocol = entry->skb->protocol; 247 pmsg->hw_protocol = entry->skb->protocol;
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 715cb613405c..5245bfd33d52 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -93,7 +93,7 @@ redirect_target(struct sk_buff **pskb,
93 newdst = 0; 93 newdst = 0;
94 94
95 rcu_read_lock(); 95 rcu_read_lock();
96 indev = __in_dev_get((*pskb)->dev); 96 indev = __in_dev_get_rcu((*pskb)->dev);
97 if (indev && (ifa = indev->ifa_list)) 97 if (indev && (ifa = indev->ifa_list))
98 newdst = ifa->ifa_local; 98 newdst = ifa->ifa_local;
99 rcu_read_unlock(); 99 rcu_read_unlock();
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index e2c14f3cb2fc..2883ccd8a91d 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -225,8 +225,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
225 225
226 /* copy hook, prefix, timestamp, payload, etc. */ 226 /* copy hook, prefix, timestamp, payload, etc. */
227 pm->data_len = copy_len; 227 pm->data_len = copy_len;
228 pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; 228 pm->timestamp_sec = skb->tstamp.off_sec;
229 pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; 229 pm->timestamp_usec = skb->tstamp.off_usec;
230 pm->mark = skb->nfmark; 230 pm->mark = skb->nfmark;
231 pm->hook = hooknum; 231 pm->hook = hooknum;
232 if (prefix != NULL) 232 if (prefix != NULL)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8549f26e2495..381dd6a6aebb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2128,7 +2128,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
2128 struct in_device *in_dev; 2128 struct in_device *in_dev;
2129 2129
2130 rcu_read_lock(); 2130 rcu_read_lock();
2131 if ((in_dev = __in_dev_get(dev)) != NULL) { 2131 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2132 int our = ip_check_mc(in_dev, daddr, saddr, 2132 int our = ip_check_mc(in_dev, daddr, saddr,
2133 skb->nh.iph->protocol); 2133 skb->nh.iph->protocol);
2134 if (our 2134 if (our
@@ -2443,7 +2443,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2443 err = -ENODEV; 2443 err = -ENODEV;
2444 if (dev_out == NULL) 2444 if (dev_out == NULL)
2445 goto out; 2445 goto out;
2446 if (__in_dev_get(dev_out) == NULL) { 2446
2447 /* RACE: Check return value of inet_select_addr instead. */
2448 if (__in_dev_get_rtnl(dev_out) == NULL) {
2447 dev_put(dev_out); 2449 dev_put(dev_out);
2448 goto out; /* Wrong error code */ 2450 goto out; /* Wrong error code */
2449 } 2451 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a7537c7bbd06..677419d0c9ad 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -355,8 +355,6 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
355 app_win -= icsk->icsk_ack.rcv_mss; 355 app_win -= icsk->icsk_ack.rcv_mss;
356 app_win = max(app_win, 2U*tp->advmss); 356 app_win = max(app_win, 2U*tp->advmss);
357 357
358 if (!ofo_win)
359 tp->window_clamp = min(tp->window_clamp, app_win);
360 tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); 358 tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
361 } 359 }
362} 360}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13dfb391cdf1..c85819d8474b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -130,19 +130,20 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
130 int dif = sk->sk_bound_dev_if; 130 int dif = sk->sk_bound_dev_if;
131 INET_ADDR_COOKIE(acookie, saddr, daddr) 131 INET_ADDR_COOKIE(acookie, saddr, daddr)
132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
133 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); 133 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
134 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; 134 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
135 struct sock *sk2; 135 struct sock *sk2;
136 const struct hlist_node *node; 136 const struct hlist_node *node;
137 struct inet_timewait_sock *tw; 137 struct inet_timewait_sock *tw;
138 138
139 prefetch(head->chain.first);
139 write_lock(&head->lock); 140 write_lock(&head->lock);
140 141
141 /* Check TIME-WAIT sockets first. */ 142 /* Check TIME-WAIT sockets first. */
142 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { 143 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
143 tw = inet_twsk(sk2); 144 tw = inet_twsk(sk2);
144 145
145 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 146 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
146 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2); 147 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
147 struct tcp_sock *tp = tcp_sk(sk); 148 struct tcp_sock *tp = tcp_sk(sk);
148 149
@@ -179,7 +180,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
179 180
180 /* And established part... */ 181 /* And established part... */
181 sk_for_each(sk2, node, &head->chain) { 182 sk_for_each(sk2, node, &head->chain) {
182 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 183 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
183 goto not_unique; 184 goto not_unique;
184 } 185 }
185 186
@@ -188,7 +189,7 @@ unique:
188 * in hash table socket with a funny identity. */ 189 * in hash table socket with a funny identity. */
189 inet->num = lport; 190 inet->num = lport;
190 inet->sport = htons(lport); 191 inet->sport = htons(lport);
191 sk->sk_hashent = hash; 192 sk->sk_hash = hash;
192 BUG_TRAP(sk_unhashed(sk)); 193 BUG_TRAP(sk_unhashed(sk));
193 __sk_add_node(sk, &head->chain); 194 __sk_add_node(sk, &head->chain);
194 sock_prot_inc_use(sk->sk_prot); 195 sock_prot_inc_use(sk->sk_prot);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5dd6dd7d091e..c5b911f9b662 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -190,7 +190,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
190 } 190 }
191 191
192 /* Set initial window to value enough for senders, 192 /* Set initial window to value enough for senders,
193 * following RFC1414. Senders, not following this RFC, 193 * following RFC2414. Senders, not following this RFC,
194 * will be satisfied with 2. 194 * will be satisfied with 2.
195 */ 195 */
196 if (mss > (1<<*rcv_wscale)) { 196 if (mss > (1<<*rcv_wscale)) {
@@ -509,7 +509,16 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
509 tp->lost_out -= diff; 509 tp->lost_out -= diff;
510 tp->left_out -= diff; 510 tp->left_out -= diff;
511 } 511 }
512
512 if (diff > 0) { 513 if (diff > 0) {
514 /* Adjust Reno SACK estimate. */
515 if (!tp->rx_opt.sack_ok) {
516 tp->sacked_out -= diff;
517 if ((int)tp->sacked_out < 0)
518 tp->sacked_out = 0;
519 tcp_sync_left_out(tp);
520 }
521
513 tp->fackets_out -= diff; 522 tp->fackets_out -= diff;
514 if ((int)tp->fackets_out < 0) 523 if ((int)tp->fackets_out < 0)
515 tp->fackets_out = 0; 524 tp->fackets_out = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2fea3f4402a0..a970b4727ce8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1806,7 +1806,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
1806 } 1806 }
1807 1807
1808 for (dev = dev_base; dev != NULL; dev = dev->next) { 1808 for (dev = dev_base; dev != NULL; dev = dev->next) {
1809 struct in_device * in_dev = __in_dev_get(dev); 1809 struct in_device * in_dev = __in_dev_get_rtnl(dev);
1810 if (in_dev && (dev->flags & IFF_UP)) { 1810 if (in_dev && (dev->flags & IFF_UP)) {
1811 struct in_ifaddr * ifa; 1811 struct in_ifaddr * ifa;
1812 1812
@@ -3520,6 +3520,8 @@ int __init addrconf_init(void)
3520 if (err) 3520 if (err)
3521 return err; 3521 return err;
3522 3522
3523 ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
3524
3523 register_netdevice_notifier(&ipv6_dev_notf); 3525 register_netdevice_notifier(&ipv6_dev_notf);
3524 3526
3525#ifdef CONFIG_IPV6_PRIVACY 3527#ifdef CONFIG_IPV6_PRIVACY
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2f589f24c093..563b442ffab8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -666,7 +666,7 @@ slow_path:
666 */ 666 */
667 fh->nexthdr = nexthdr; 667 fh->nexthdr = nexthdr;
668 fh->reserved = 0; 668 fh->reserved = 0;
669 if (frag_id) { 669 if (!frag_id) {
670 ipv6_select_ident(skb, fh); 670 ipv6_select_ident(skb, fh);
671 frag_id = fh->identification; 671 frag_id = fh->identification;
672 } else 672 } else
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 216fbe1ac65c..bb7ccfe33f23 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -209,6 +209,17 @@ config IP6_NF_TARGET_REJECT
209 209
210 To compile it as a module, choose M here. If unsure, say N. 210 To compile it as a module, choose M here. If unsure, say N.
211 211
212config IP6_NF_TARGET_NFQUEUE
213 tristate "NFQUEUE Target Support"
214 depends on IP_NF_IPTABLES
215 help
216 This Target replaced the old obsolete QUEUE target.
217
218 As opposed to QUEUE, it supports 65535 different queues,
219 not just one.
220
221 To compile it as a module, choose M here. If unsure, say N.
222
212# if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then 223# if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then
213# dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER 224# dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER
214# if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then 225# if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index bd9a16a5cbba..2b2c370e8b1c 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -21,9 +21,9 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
21obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 21obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
22obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o 22obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
23obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o 23obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
24obj-$(CONFIG_IP6_NF_TARGET_NFQUEUE) += ip6t_NFQUEUE.o
24obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 25obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
25obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o 26obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
26obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 27obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
27obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o 28obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
28obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 29obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
29obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ip6t_NFQUEUE.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index aa11cf366efa..5027bbe6415e 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -238,8 +238,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
238 238
239 pmsg->packet_id = (unsigned long )entry; 239 pmsg->packet_id = (unsigned long )entry;
240 pmsg->data_len = data_len; 240 pmsg->data_len = data_len;
241 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; 241 pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
242 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; 242 pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
243 pmsg->mark = entry->skb->nfmark; 243 pmsg->mark = entry->skb->nfmark;
244 pmsg->hook = entry->info->hook; 244 pmsg->hook = entry->info->hook;
245 pmsg->hw_protocol = entry->skb->protocol; 245 pmsg->hw_protocol = entry->skb->protocol;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 80643e6b346b..d693cb988b78 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -209,9 +209,11 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
209 lock = &tcp_hashinfo.lhash_lock; 209 lock = &tcp_hashinfo.lhash_lock;
210 inet_listen_wlock(&tcp_hashinfo); 210 inet_listen_wlock(&tcp_hashinfo);
211 } else { 211 } else {
212 sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size); 212 unsigned int hash;
213 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; 213 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
214 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; 214 hash &= (tcp_hashinfo.ehash_size - 1);
215 list = &tcp_hashinfo.ehash[hash].chain;
216 lock = &tcp_hashinfo.ehash[hash].lock;
215 write_lock(lock); 217 write_lock(lock);
216 } 218 }
217 219
@@ -322,13 +324,13 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
322 const struct in6_addr *saddr = &np->daddr; 324 const struct in6_addr *saddr = &np->daddr;
323 const int dif = sk->sk_bound_dev_if; 325 const int dif = sk->sk_bound_dev_if;
324 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 326 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
325 const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport, 327 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
326 tcp_hashinfo.ehash_size); 328 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
327 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
328 struct sock *sk2; 329 struct sock *sk2;
329 const struct hlist_node *node; 330 const struct hlist_node *node;
330 struct inet_timewait_sock *tw; 331 struct inet_timewait_sock *tw;
331 332
333 prefetch(head->chain.first);
332 write_lock(&head->lock); 334 write_lock(&head->lock);
333 335
334 /* Check TIME-WAIT sockets first. */ 336 /* Check TIME-WAIT sockets first. */
@@ -365,14 +367,14 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
365 367
366 /* And established part... */ 368 /* And established part... */
367 sk_for_each(sk2, node, &head->chain) { 369 sk_for_each(sk2, node, &head->chain) {
368 if (INET6_MATCH(sk2, saddr, daddr, ports, dif)) 370 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
369 goto not_unique; 371 goto not_unique;
370 } 372 }
371 373
372unique: 374unique:
373 BUG_TRAP(sk_unhashed(sk)); 375 BUG_TRAP(sk_unhashed(sk));
374 __sk_add_node(sk, &head->chain); 376 __sk_add_node(sk, &head->chain);
375 sk->sk_hashent = hash; 377 sk->sk_hash = hash;
376 sock_prot_inc_use(sk->sk_prot); 378 sock_prot_inc_use(sk->sk_prot);
377 write_unlock(&head->lock); 379 write_unlock(&head->lock);
378 380
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6001948600f3..e4cad11f284a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -852,10 +852,16 @@ do_append_data:
852 else if (!corkreq) 852 else if (!corkreq)
853 err = udp_v6_push_pending_frames(sk, up); 853 err = udp_v6_push_pending_frames(sk, up);
854 854
855 if (dst && connected) 855 if (dst) {
856 ip6_dst_store(sk, dst, 856 if (connected) {
857 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? 857 ip6_dst_store(sk, dst,
858 &np->daddr : NULL); 858 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ?
859 &np->daddr : NULL);
860 } else {
861 dst_release(dst);
862 }
863 }
864
859 if (err > 0) 865 if (err > 0)
860 err = np->recverr ? net_xmit_errno(err) : 0; 866 err = np->recverr ? net_xmit_errno(err) : 0;
861 release_sock(sk); 867 release_sock(sk);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 071cd2cefd8a..953e255d2bc8 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -310,7 +310,7 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
310#ifdef CONFIG_INET 310#ifdef CONFIG_INET
311 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); 311 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
312 rcu_read_lock(); 312 rcu_read_lock();
313 in_dev = __in_dev_get(dev); 313 in_dev = __in_dev_get_rcu(dev);
314 if (in_dev == NULL) 314 if (in_dev == NULL)
315 goto out; 315 goto out;
316 if (in_dev->ifa_list) 316 if (in_dev->ifa_list)
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 6602d901f8b1..8aff254cb418 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -38,7 +38,7 @@
38#include <net/irda/parameters.h> 38#include <net/irda/parameters.h>
39#include <net/irda/irttp.h> 39#include <net/irda/irttp.h>
40 40
41static struct irttp_cb *irttp = NULL; 41static struct irttp_cb *irttp;
42 42
43static void __irttp_close_tsap(struct tsap_cb *self); 43static void __irttp_close_tsap(struct tsap_cb *self);
44 44
@@ -86,12 +86,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
86 */ 86 */
87int __init irttp_init(void) 87int __init irttp_init(void)
88{ 88{
89 /* Initialize the irttp structure. */ 89 irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL);
90 if (irttp == NULL) { 90 if (irttp == NULL)
91 irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 91 return -ENOMEM;
92 if (irttp == NULL)
93 return -ENOMEM;
94 }
95 memset(irttp, 0, sizeof(struct irttp_cb)); 92 memset(irttp, 0, sizeof(struct irttp_cb));
96 93
97 irttp->magic = TTP_MAGIC; 94 irttp->magic = TTP_MAGIC;
@@ -100,6 +97,7 @@ int __init irttp_init(void)
100 if (!irttp->tsaps) { 97 if (!irttp->tsaps) {
101 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", 98 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
102 __FUNCTION__); 99 __FUNCTION__);
100 kfree(irttp);
103 return -ENOMEM; 101 return -ENOMEM;
104 } 102 }
105 103
@@ -115,7 +113,6 @@ int __init irttp_init(void)
115void __exit irttp_cleanup(void) 113void __exit irttp_cleanup(void)
116{ 114{
117 /* Check for main structure */ 115 /* Check for main structure */
118 IRDA_ASSERT(irttp != NULL, return;);
119 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); 116 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
120 117
121 /* 118 /*
@@ -382,7 +379,6 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
382 struct lsap_cb *lsap; 379 struct lsap_cb *lsap;
383 notify_t ttp_notify; 380 notify_t ttp_notify;
384 381
385 IRDA_ASSERT(irttp != NULL, return NULL;);
386 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;); 382 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
387 383
388 /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to 384 /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
@@ -1880,8 +1876,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1880 struct seq_file *seq; 1876 struct seq_file *seq;
1881 int rc = -ENOMEM; 1877 int rc = -ENOMEM;
1882 struct irttp_iter_state *s; 1878 struct irttp_iter_state *s;
1883
1884 IRDA_ASSERT(irttp != NULL, return -EINVAL;);
1885 1879
1886 s = kmalloc(sizeof(*s), GFP_KERNEL); 1880 s = kmalloc(sizeof(*s), GFP_KERNEL);
1887 if (!s) 1881 if (!s)
diff --git a/net/llc/Makefile b/net/llc/Makefile
index 5ebd4ed2bd42..4e260cff3c5d 100644
--- a/net/llc/Makefile
+++ b/net/llc/Makefile
@@ -22,3 +22,4 @@ llc2-y := llc_if.o llc_c_ev.o llc_c_ac.o llc_conn.o llc_c_st.o llc_pdu.o \
22 llc_sap.o llc_s_ac.o llc_s_ev.o llc_s_st.o af_llc.o llc_station.o 22 llc_sap.o llc_s_ac.o llc_s_ev.o llc_s_st.o af_llc.o llc_station.o
23 23
24llc2-$(CONFIG_PROC_FS) += llc_proc.o 24llc2-$(CONFIG_PROC_FS) += llc_proc.o
25llc2-$(CONFIG_SYSCTL) += sysctl_net_llc.o
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 66f55e514b56..59d02cbbeb9e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -21,6 +21,7 @@
21 * See the GNU General Public License for more details. 21 * See the GNU General Public License for more details.
22 */ 22 */
23#include <linux/config.h> 23#include <linux/config.h>
24#include <linux/compiler.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
@@ -37,10 +38,9 @@ static u16 llc_ui_sap_link_no_max[256];
37static struct sockaddr_llc llc_ui_addrnull; 38static struct sockaddr_llc llc_ui_addrnull;
38static struct proto_ops llc_ui_ops; 39static struct proto_ops llc_ui_ops;
39 40
40static int llc_ui_wait_for_conn(struct sock *sk, int timeout); 41static int llc_ui_wait_for_conn(struct sock *sk, long timeout);
41static int llc_ui_wait_for_disc(struct sock *sk, int timeout); 42static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
42static int llc_ui_wait_for_data(struct sock *sk, int timeout); 43static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
43static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout);
44 44
45#if 0 45#if 0
46#define dprintk(args...) printk(KERN_DEBUG args) 46#define dprintk(args...) printk(KERN_DEBUG args)
@@ -116,12 +116,12 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
116 struct llc_sock* llc = llc_sk(sk); 116 struct llc_sock* llc = llc_sk(sk);
117 int rc = 0; 117 int rc = 0;
118 118
119 if (llc_data_accept_state(llc->state) || llc->p_flag) { 119 if (unlikely(llc_data_accept_state(llc->state) || llc->p_flag)) {
120 int timeout = sock_sndtimeo(sk, noblock); 120 long timeout = sock_sndtimeo(sk, noblock);
121 121
122 rc = llc_ui_wait_for_busy_core(sk, timeout); 122 rc = llc_ui_wait_for_busy_core(sk, timeout);
123 } 123 }
124 if (!rc) 124 if (unlikely(!rc))
125 rc = llc_build_and_send_pkt(sk, skb); 125 rc = llc_build_and_send_pkt(sk, skb);
126 return rc; 126 return rc;
127} 127}
@@ -155,7 +155,7 @@ static int llc_ui_create(struct socket *sock, int protocol)
155 struct sock *sk; 155 struct sock *sk;
156 int rc = -ESOCKTNOSUPPORT; 156 int rc = -ESOCKTNOSUPPORT;
157 157
158 if (sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM) { 158 if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
159 rc = -ENOMEM; 159 rc = -ENOMEM;
160 sk = llc_sk_alloc(PF_LLC, GFP_KERNEL, &llc_proto); 160 sk = llc_sk_alloc(PF_LLC, GFP_KERNEL, &llc_proto);
161 if (sk) { 161 if (sk) {
@@ -177,7 +177,7 @@ static int llc_ui_release(struct socket *sock)
177 struct sock *sk = sock->sk; 177 struct sock *sk = sock->sk;
178 struct llc_sock *llc; 178 struct llc_sock *llc;
179 179
180 if (!sk) 180 if (unlikely(sk == NULL))
181 goto out; 181 goto out;
182 sock_hold(sk); 182 sock_hold(sk);
183 lock_sock(sk); 183 lock_sock(sk);
@@ -189,10 +189,6 @@ static int llc_ui_release(struct socket *sock)
189 if (!sock_flag(sk, SOCK_ZAPPED)) 189 if (!sock_flag(sk, SOCK_ZAPPED))
190 llc_sap_remove_socket(llc->sap, sk); 190 llc_sap_remove_socket(llc->sap, sk);
191 release_sock(sk); 191 release_sock(sk);
192 if (llc->sap && hlist_empty(&llc->sap->sk_list.list)) {
193 llc_release_sockets(llc->sap);
194 llc_sap_close(llc->sap);
195 }
196 if (llc->dev) 192 if (llc->dev)
197 dev_put(llc->dev); 193 dev_put(llc->dev);
198 sock_put(sk); 194 sock_put(sk);
@@ -221,6 +217,7 @@ static int llc_ui_autoport(void)
221 llc_ui_sap_last_autoport = i + 2; 217 llc_ui_sap_last_autoport = i + 2;
222 goto out; 218 goto out;
223 } 219 }
220 llc_sap_put(sap);
224 } 221 }
225 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; 222 llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
226 tries++; 223 tries++;
@@ -231,20 +228,13 @@ out:
231} 228}
232 229
233/** 230/**
234 * llc_ui_autobind - Bind a socket to a specific address. 231 * llc_ui_autobind - automatically bind a socket to a sap
235 * @sk: Socket to bind an address to. 232 * @sock: socket to bind
236 * @addr: Address the user wants the socket bound to. 233 * @addr: address to connect to
234 *
235 * Used by llc_ui_connect and llc_ui_sendmsg when the user hasn't
236 * specifically used llc_ui_bind to bind to an specific address/sap
237 * 237 *
238 * Bind a socket to a specific address. For llc a user is able to bind to
239 * a specific sap only or mac + sap. If the user only specifies a sap and
240 * a null dmac (all zeros) the user is attempting to bind to an entire
241 * sap. This will stop anyone else on the local system from using that
242 * sap. If someone else has a mac + sap open the bind to null + sap will
243 * fail.
244 * If the user desires to bind to a specific mac + sap, it is possible to
245 * have multiple sap connections via multiple macs.
246 * Bind and autobind for that matter must enforce the correct sap usage
247 * otherwise all hell will break loose.
248 * Returns: 0 upon success, negative otherwise. 238 * Returns: 0 upon success, negative otherwise.
249 */ 239 */
250static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) 240static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
@@ -285,11 +275,7 @@ out:
285 * @addrlen: Length of the uaddr structure. 275 * @addrlen: Length of the uaddr structure.
286 * 276 *
287 * Bind a socket to a specific address. For llc a user is able to bind to 277 * Bind a socket to a specific address. For llc a user is able to bind to
288 * a specific sap only or mac + sap. If the user only specifies a sap and 278 * a specific sap only or mac + sap.
289 * a null dmac (all zeros) the user is attempting to bind to an entire
290 * sap. This will stop anyone else on the local system from using that
291 * sap. If someone else has a mac + sap open the bind to null + sap will
292 * fail.
293 * If the user desires to bind to a specific mac + sap, it is possible to 279 * If the user desires to bind to a specific mac + sap, it is possible to
294 * have multiple sap connections via multiple macs. 280 * have multiple sap connections via multiple macs.
295 * Bind and autobind for that matter must enforce the correct sap usage 281 * Bind and autobind for that matter must enforce the correct sap usage
@@ -305,10 +291,16 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
305 int rc = -EINVAL; 291 int rc = -EINVAL;
306 292
307 dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_sap); 293 dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_sap);
308 if (!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)) 294 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
309 goto out; 295 goto out;
310 rc = -EAFNOSUPPORT; 296 rc = -EAFNOSUPPORT;
311 if (addr->sllc_family != AF_LLC) 297 if (unlikely(addr->sllc_family != AF_LLC))
298 goto out;
299 rc = -ENODEV;
300 rtnl_lock();
301 llc->dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_mac);
302 rtnl_unlock();
303 if (!llc->dev)
312 goto out; 304 goto out;
313 if (!addr->sllc_sap) { 305 if (!addr->sllc_sap) {
314 rc = -EUSERS; 306 rc = -EUSERS;
@@ -322,6 +314,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
322 rc = -EBUSY; /* some other network layer is using the sap */ 314 rc = -EBUSY; /* some other network layer is using the sap */
323 if (!sap) 315 if (!sap)
324 goto out; 316 goto out;
317 llc_sap_hold(sap);
325 } else { 318 } else {
326 struct llc_addr laddr, daddr; 319 struct llc_addr laddr, daddr;
327 struct sock *ask; 320 struct sock *ask;
@@ -338,7 +331,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
338 ask = llc_lookup_established(sap, &daddr, &laddr); 331 ask = llc_lookup_established(sap, &daddr, &laddr);
339 if (ask) { 332 if (ask) {
340 sock_put(ask); 333 sock_put(ask);
341 goto out; 334 goto out_put;
342 } 335 }
343 } 336 }
344 llc->laddr.lsap = addr->sllc_sap; 337 llc->laddr.lsap = addr->sllc_sap;
@@ -348,6 +341,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
348 llc_sap_add_socket(sap, sk); 341 llc_sap_add_socket(sap, sk);
349 sock_reset_flag(sk, SOCK_ZAPPED); 342 sock_reset_flag(sk, SOCK_ZAPPED);
350 rc = 0; 343 rc = 0;
344out_put:
345 llc_sap_put(sap);
351out: 346out:
352 return rc; 347 return rc;
353} 348}
@@ -369,7 +364,7 @@ static int llc_ui_shutdown(struct socket *sock, int how)
369 int rc = -ENOTCONN; 364 int rc = -ENOTCONN;
370 365
371 lock_sock(sk); 366 lock_sock(sk);
372 if (sk->sk_state != TCP_ESTABLISHED) 367 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
373 goto out; 368 goto out;
374 rc = -EINVAL; 369 rc = -EINVAL;
375 if (how != 2) 370 if (how != 2)
@@ -404,14 +399,18 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
404 struct sock *sk = sock->sk; 399 struct sock *sk = sock->sk;
405 struct llc_sock *llc = llc_sk(sk); 400 struct llc_sock *llc = llc_sk(sk);
406 struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; 401 struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr;
407 struct net_device *dev;
408 int rc = -EINVAL; 402 int rc = -EINVAL;
409 403
410 lock_sock(sk); 404 lock_sock(sk);
411 if (addrlen != sizeof(*addr)) 405 if (unlikely(addrlen != sizeof(*addr)))
412 goto out; 406 goto out;
413 rc = -EAFNOSUPPORT; 407 rc = -EAFNOSUPPORT;
414 if (addr->sllc_family != AF_LLC) 408 if (unlikely(addr->sllc_family != AF_LLC))
409 goto out;
410 if (unlikely(sk->sk_type != SOCK_STREAM))
411 goto out;
412 rc = -EALREADY;
413 if (unlikely(sock->state == SS_CONNECTING))
415 goto out; 414 goto out;
416 /* bind connection to sap if user hasn't done it. */ 415 /* bind connection to sap if user hasn't done it. */
417 if (sock_flag(sk, SOCK_ZAPPED)) { 416 if (sock_flag(sk, SOCK_ZAPPED)) {
@@ -419,19 +418,13 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
419 rc = llc_ui_autobind(sock, addr); 418 rc = llc_ui_autobind(sock, addr);
420 if (rc) 419 if (rc)
421 goto out; 420 goto out;
422 llc->daddr.lsap = addr->sllc_sap;
423 memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN);
424 } 421 }
425 dev = llc->dev; 422 llc->daddr.lsap = addr->sllc_sap;
426 if (sk->sk_type != SOCK_STREAM) 423 memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN);
427 goto out;
428 rc = -EALREADY;
429 if (sock->state == SS_CONNECTING)
430 goto out;
431 sock->state = SS_CONNECTING; 424 sock->state = SS_CONNECTING;
432 sk->sk_state = TCP_SYN_SENT; 425 sk->sk_state = TCP_SYN_SENT;
433 llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap); 426 llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap);
434 rc = llc_establish_connection(sk, dev->dev_addr, 427 rc = llc_establish_connection(sk, llc->dev->dev_addr,
435 addr->sllc_mac, addr->sllc_sap); 428 addr->sllc_mac, addr->sllc_sap);
436 if (rc) { 429 if (rc) {
437 dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__); 430 dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__);
@@ -439,12 +432,30 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
439 sk->sk_state = TCP_CLOSE; 432 sk->sk_state = TCP_CLOSE;
440 goto out; 433 goto out;
441 } 434 }
442 rc = llc_ui_wait_for_conn(sk, sk->sk_rcvtimeo); 435
443 if (rc) 436 if (sk->sk_state == TCP_SYN_SENT) {
444 dprintk("%s: llc_ui_wait_for_conn failed=%d\n", __FUNCTION__, rc); 437 const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
438
439 if (!timeo || !llc_ui_wait_for_conn(sk, timeo))
440 goto out;
441
442 rc = sock_intr_errno(timeo);
443 if (signal_pending(current))
444 goto out;
445 }
446
447 if (sk->sk_state == TCP_CLOSE)
448 goto sock_error;
449
450 sock->state = SS_CONNECTED;
451 rc = 0;
445out: 452out:
446 release_sock(sk); 453 release_sock(sk);
447 return rc; 454 return rc;
455sock_error:
456 rc = sock_error(sk) ? : -ECONNABORTED;
457 sock->state = SS_UNCONNECTED;
458 goto out;
448} 459}
449 460
450/** 461/**
@@ -461,10 +472,10 @@ static int llc_ui_listen(struct socket *sock, int backlog)
461 int rc = -EINVAL; 472 int rc = -EINVAL;
462 473
463 lock_sock(sk); 474 lock_sock(sk);
464 if (sock->state != SS_UNCONNECTED) 475 if (unlikely(sock->state != SS_UNCONNECTED))
465 goto out; 476 goto out;
466 rc = -EOPNOTSUPP; 477 rc = -EOPNOTSUPP;
467 if (sk->sk_type != SOCK_STREAM) 478 if (unlikely(sk->sk_type != SOCK_STREAM))
468 goto out; 479 goto out;
469 rc = -EAGAIN; 480 rc = -EAGAIN;
470 if (sock_flag(sk, SOCK_ZAPPED)) 481 if (sock_flag(sk, SOCK_ZAPPED))
@@ -483,20 +494,14 @@ out:
483 return rc; 494 return rc;
484} 495}
485 496
486static int llc_ui_wait_for_disc(struct sock *sk, int timeout) 497static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
487{ 498{
488 DECLARE_WAITQUEUE(wait, current); 499 DEFINE_WAIT(wait);
489 int rc; 500 int rc = 0;
490 501
491 add_wait_queue_exclusive(sk->sk_sleep, &wait); 502 while (1) {
492 for (;;) { 503 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
493 __set_current_state(TASK_INTERRUPTIBLE); 504 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
494 rc = 0;
495 if (sk->sk_state != TCP_CLOSE) {
496 release_sock(sk);
497 timeout = schedule_timeout(timeout);
498 lock_sock(sk);
499 } else
500 break; 505 break;
501 rc = -ERESTARTSYS; 506 rc = -ERESTARTSYS;
502 if (signal_pending(current)) 507 if (signal_pending(current))
@@ -504,65 +509,40 @@ static int llc_ui_wait_for_disc(struct sock *sk, int timeout)
504 rc = -EAGAIN; 509 rc = -EAGAIN;
505 if (!timeout) 510 if (!timeout)
506 break; 511 break;
512 rc = 0;
507 } 513 }
508 __set_current_state(TASK_RUNNING); 514 finish_wait(sk->sk_sleep, &wait);
509 remove_wait_queue(sk->sk_sleep, &wait);
510 return rc; 515 return rc;
511} 516}
512 517
513static int llc_ui_wait_for_conn(struct sock *sk, int timeout) 518static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
514{ 519{
515 DECLARE_WAITQUEUE(wait, current); 520 DEFINE_WAIT(wait);
516 int rc;
517 521
518 add_wait_queue_exclusive(sk->sk_sleep, &wait); 522 while (1) {
519 for (;;) { 523 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
520 __set_current_state(TASK_INTERRUPTIBLE); 524 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
521 rc = -EAGAIN;
522 if (sk->sk_state == TCP_CLOSE)
523 break;
524 rc = 0;
525 if (sk->sk_state != TCP_ESTABLISHED) {
526 release_sock(sk);
527 timeout = schedule_timeout(timeout);
528 lock_sock(sk);
529 } else
530 break; 525 break;
531 rc = -ERESTARTSYS; 526 if (signal_pending(current) || !timeout)
532 if (signal_pending(current))
533 break;
534 rc = -EAGAIN;
535 if (!timeout)
536 break; 527 break;
537 } 528 }
538 __set_current_state(TASK_RUNNING); 529 finish_wait(sk->sk_sleep, &wait);
539 remove_wait_queue(sk->sk_sleep, &wait); 530 return timeout;
540 return rc;
541} 531}
542 532
543static int llc_ui_wait_for_data(struct sock *sk, int timeout) 533static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
544{ 534{
545 DECLARE_WAITQUEUE(wait, current); 535 DEFINE_WAIT(wait);
546 int rc = 0; 536 struct llc_sock *llc = llc_sk(sk);
537 int rc;
547 538
548 add_wait_queue_exclusive(sk->sk_sleep, &wait); 539 while (1) {
549 for (;;) { 540 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
550 __set_current_state(TASK_INTERRUPTIBLE);
551 if (sk->sk_shutdown & RCV_SHUTDOWN)
552 break;
553 /*
554 * Well, if we have backlog, try to process it now.
555 */
556 if (sk->sk_backlog.tail) {
557 release_sock(sk);
558 lock_sock(sk);
559 }
560 rc = 0; 541 rc = 0;
561 if (skb_queue_empty(&sk->sk_receive_queue)) { 542 if (sk_wait_event(sk, &timeout,
562 release_sock(sk); 543 (sk->sk_shutdown & RCV_SHUTDOWN) ||
563 timeout = schedule_timeout(timeout); 544 (!llc_data_accept_state(llc->state) &&
564 lock_sock(sk); 545 !llc->p_flag)))
565 } else
566 break; 546 break;
567 rc = -ERESTARTSYS; 547 rc = -ERESTARTSYS;
568 if (signal_pending(current)) 548 if (signal_pending(current))
@@ -571,40 +551,35 @@ static int llc_ui_wait_for_data(struct sock *sk, int timeout)
571 if (!timeout) 551 if (!timeout)
572 break; 552 break;
573 } 553 }
574 __set_current_state(TASK_RUNNING); 554 finish_wait(sk->sk_sleep, &wait);
575 remove_wait_queue(sk->sk_sleep, &wait);
576 return rc; 555 return rc;
577} 556}
578 557
579static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) 558static int llc_wait_data(struct sock *sk, long timeo)
580{ 559{
581 DECLARE_WAITQUEUE(wait, current);
582 struct llc_sock *llc = llc_sk(sk);
583 int rc; 560 int rc;
584 561
585 add_wait_queue_exclusive(sk->sk_sleep, &wait); 562 while (1) {
586 for (;;) { 563 /*
587 dprintk("%s: looping...\n", __FUNCTION__); 564 * POSIX 1003.1g mandates this order.
588 __set_current_state(TASK_INTERRUPTIBLE); 565 */
589 rc = -ENOTCONN; 566 if (sk->sk_err) {
590 if (sk->sk_shutdown & RCV_SHUTDOWN) 567 rc = sock_error(sk);
591 break; 568 break;
569 }
592 rc = 0; 570 rc = 0;
593 if (llc_data_accept_state(llc->state) || llc->p_flag) { 571 if (sk->sk_shutdown & RCV_SHUTDOWN)
594 release_sock(sk);
595 timeout = schedule_timeout(timeout);
596 lock_sock(sk);
597 } else
598 break; 572 break;
599 rc = -ERESTARTSYS; 573 rc = -EAGAIN;
574 if (!timeo)
575 break;
576 rc = sock_intr_errno(timeo);
600 if (signal_pending(current)) 577 if (signal_pending(current))
601 break; 578 break;
602 rc = -EAGAIN; 579 rc = 0;
603 if (!timeout) 580 if (sk_wait_data(sk, &timeo))
604 break; 581 break;
605 } 582 }
606 __set_current_state(TASK_RUNNING);
607 remove_wait_queue(sk->sk_sleep, &wait);
608 return rc; 583 return rc;
609} 584}
610 585
@@ -627,15 +602,18 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
627 dprintk("%s: accepting on %02X\n", __FUNCTION__, 602 dprintk("%s: accepting on %02X\n", __FUNCTION__,
628 llc_sk(sk)->laddr.lsap); 603 llc_sk(sk)->laddr.lsap);
629 lock_sock(sk); 604 lock_sock(sk);
630 if (sk->sk_type != SOCK_STREAM) 605 if (unlikely(sk->sk_type != SOCK_STREAM))
631 goto out; 606 goto out;
632 rc = -EINVAL; 607 rc = -EINVAL;
633 if (sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN) 608 if (unlikely(sock->state != SS_UNCONNECTED ||
609 sk->sk_state != TCP_LISTEN))
634 goto out; 610 goto out;
635 /* wait for a connection to arrive. */ 611 /* wait for a connection to arrive. */
636 rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo); 612 if (skb_queue_empty(&sk->sk_receive_queue)) {
637 if (rc) 613 rc = llc_wait_data(sk, sk->sk_rcvtimeo);
638 goto out; 614 if (rc)
615 goto out;
616 }
639 dprintk("%s: got a new connection on %02X\n", __FUNCTION__, 617 dprintk("%s: got a new connection on %02X\n", __FUNCTION__,
640 llc_sk(sk)->laddr.lsap); 618 llc_sk(sk)->laddr.lsap);
641 skb = skb_dequeue(&sk->sk_receive_queue); 619 skb = skb_dequeue(&sk->sk_receive_queue);
@@ -657,7 +635,6 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
657 /* put original socket back into a clean listen state. */ 635 /* put original socket back into a clean listen state. */
658 sk->sk_state = TCP_LISTEN; 636 sk->sk_state = TCP_LISTEN;
659 sk->sk_ack_backlog--; 637 sk->sk_ack_backlog--;
660 skb->sk = NULL;
661 dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__, 638 dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__,
662 llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); 639 llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
663frees: 640frees:
@@ -671,56 +648,167 @@ out:
671 * llc_ui_recvmsg - copy received data to the socket user. 648 * llc_ui_recvmsg - copy received data to the socket user.
672 * @sock: Socket to copy data from. 649 * @sock: Socket to copy data from.
673 * @msg: Various user space related information. 650 * @msg: Various user space related information.
674 * @size: Size of user buffer. 651 * @len: Size of user buffer.
675 * @flags: User specified flags. 652 * @flags: User specified flags.
676 * 653 *
677 * Copy received data to the socket user. 654 * Copy received data to the socket user.
678 * Returns non-negative upon success, negative otherwise. 655 * Returns non-negative upon success, negative otherwise.
679 */ 656 */
680static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, 657static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
681 struct msghdr *msg, size_t size, int flags) 658 struct msghdr *msg, size_t len, int flags)
682{ 659{
683 struct sock *sk = sock->sk;
684 struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; 660 struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name;
685 struct sk_buff *skb; 661 const int nonblock = flags & MSG_DONTWAIT;
662 struct sk_buff *skb = NULL;
663 struct sock *sk = sock->sk;
664 struct llc_sock *llc = llc_sk(sk);
686 size_t copied = 0; 665 size_t copied = 0;
687 int rc = -ENOMEM, timeout; 666 u32 peek_seq = 0;
688 int noblock = flags & MSG_DONTWAIT; 667 u32 *seq;
668 unsigned long used;
669 int target; /* Read at least this many bytes */
670 long timeo;
689 671
690 dprintk("%s: receiving in %02X from %02X\n", __FUNCTION__,
691 llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
692 lock_sock(sk); 672 lock_sock(sk);
693 timeout = sock_rcvtimeo(sk, noblock); 673 copied = -ENOTCONN;
694 rc = llc_ui_wait_for_data(sk, timeout); 674 if (sk->sk_state == TCP_LISTEN)
695 if (rc) {
696 dprintk("%s: llc_ui_wait_for_data failed recv "
697 "in %02X from %02X\n", __FUNCTION__,
698 llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
699 goto out; 675 goto out;
700 } 676
701 skb = skb_dequeue(&sk->sk_receive_queue); 677 timeo = sock_rcvtimeo(sk, nonblock);
702 if (!skb) /* shutdown */ 678
703 goto out; 679 seq = &llc->copied_seq;
704 copied = skb->len; 680 if (flags & MSG_PEEK) {
705 if (copied > size) 681 peek_seq = llc->copied_seq;
706 copied = size; 682 seq = &peek_seq;
707 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 683 }
708 if (rc) 684
709 goto dgram_free; 685 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
710 if (skb->len > copied) { 686 copied = 0;
711 skb_pull(skb, copied); 687
712 skb_queue_head(&sk->sk_receive_queue, skb); 688 do {
713 } 689 u32 offset;
714 if (uaddr) 690
715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 691 /*
716 msg->msg_namelen = sizeof(*uaddr); 692 * We need to check signals first, to get correct SIGURG
717 if (!skb->next) { 693 * handling. FIXME: Need to check this doesn't impact 1003.1g
718dgram_free: 694 * and move it down to the bottom of the loop
719 kfree_skb(skb); 695 */
720 } 696 if (signal_pending(current)) {
697 if (copied)
698 break;
699 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
700 break;
701 }
702
703 /* Next get a buffer. */
704
705 skb = skb_peek(&sk->sk_receive_queue);
706 if (skb) {
707 offset = *seq;
708 goto found_ok_skb;
709 }
710 /* Well, if we have backlog, try to process it now yet. */
711
712 if (copied >= target && !sk->sk_backlog.tail)
713 break;
714
715 if (copied) {
716 if (sk->sk_err ||
717 sk->sk_state == TCP_CLOSE ||
718 (sk->sk_shutdown & RCV_SHUTDOWN) ||
719 !timeo ||
720 (flags & MSG_PEEK))
721 break;
722 } else {
723 if (sock_flag(sk, SOCK_DONE))
724 break;
725
726 if (sk->sk_err) {
727 copied = sock_error(sk);
728 break;
729 }
730 if (sk->sk_shutdown & RCV_SHUTDOWN)
731 break;
732
733 if (sk->sk_state == TCP_CLOSE) {
734 if (!sock_flag(sk, SOCK_DONE)) {
735 /*
736 * This occurs when user tries to read
737 * from never connected socket.
738 */
739 copied = -ENOTCONN;
740 break;
741 }
742 break;
743 }
744 if (!timeo) {
745 copied = -EAGAIN;
746 break;
747 }
748 }
749
750 if (copied >= target) { /* Do not sleep, just process backlog. */
751 release_sock(sk);
752 lock_sock(sk);
753 } else
754 sk_wait_data(sk, &timeo);
755
756 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
757 if (net_ratelimit())
758 printk(KERN_DEBUG "LLC(%s:%d): Application "
759 "bug, race in MSG_PEEK.\n",
760 current->comm, current->pid);
761 peek_seq = llc->copied_seq;
762 }
763 continue;
764 found_ok_skb:
765 /* Ok so how much can we use? */
766 used = skb->len - offset;
767 if (len < used)
768 used = len;
769
770 if (!(flags & MSG_TRUNC)) {
771 int rc = skb_copy_datagram_iovec(skb, offset,
772 msg->msg_iov, used);
773 if (rc) {
774 /* Exception. Bailout! */
775 if (!copied)
776 copied = -EFAULT;
777 break;
778 }
779 }
780
781 *seq += used;
782 copied += used;
783 len -= used;
784
785 if (used + offset < skb->len)
786 continue;
787
788 if (!(flags & MSG_PEEK)) {
789 sk_eat_skb(sk, skb);
790 *seq = 0;
791 }
792 } while (len > 0);
793
794 /*
795 * According to UNIX98, msg_name/msg_namelen are ignored
796 * on connected socket. -ANK
797 * But... af_llc still doesn't have separate sets of methods for
798 * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will
799 * eventually fix this tho :-) -acme
800 */
801 if (sk->sk_type == SOCK_DGRAM)
802 goto copy_uaddr;
721out: 803out:
722 release_sock(sk); 804 release_sock(sk);
723 return rc ? : copied; 805 return copied;
806copy_uaddr:
807 if (uaddr != NULL && skb != NULL) {
808 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
809 msg->msg_namelen = sizeof(*uaddr);
810 }
811 goto out;
724} 812}
725 813
726/** 814/**
@@ -740,7 +828,6 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
740 struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name; 828 struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name;
741 int flags = msg->msg_flags; 829 int flags = msg->msg_flags;
742 int noblock = flags & MSG_DONTWAIT; 830 int noblock = flags & MSG_DONTWAIT;
743 struct net_device *dev;
744 struct sk_buff *skb; 831 struct sk_buff *skb;
745 size_t size = 0; 832 size_t size = 0;
746 int rc = -EINVAL, copied = 0, hdrlen; 833 int rc = -EINVAL, copied = 0, hdrlen;
@@ -763,19 +850,17 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
763 if (rc) 850 if (rc)
764 goto release; 851 goto release;
765 } 852 }
766 dev = llc->dev; 853 hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
767 hdrlen = dev->hard_header_len + llc_ui_header_len(sk, addr);
768 size = hdrlen + len; 854 size = hdrlen + len;
769 if (size > dev->mtu) 855 if (size > llc->dev->mtu)
770 size = dev->mtu; 856 size = llc->dev->mtu;
771 copied = size - hdrlen; 857 copied = size - hdrlen;
772 release_sock(sk); 858 release_sock(sk);
773 skb = sock_alloc_send_skb(sk, size, noblock, &rc); 859 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
774 lock_sock(sk); 860 lock_sock(sk);
775 if (!skb) 861 if (!skb)
776 goto release; 862 goto release;
777 skb->sk = sk; 863 skb->dev = llc->dev;
778 skb->dev = dev;
779 skb->protocol = llc_proto_type(addr->sllc_arphrd); 864 skb->protocol = llc_proto_type(addr->sllc_arphrd);
780 skb_reserve(skb, hdrlen); 865 skb_reserve(skb, hdrlen);
781 rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied); 866 rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
@@ -800,15 +885,13 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
800 if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua)) 885 if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
801 goto out; 886 goto out;
802 rc = llc_ui_send_data(sk, skb, noblock); 887 rc = llc_ui_send_data(sk, skb, noblock);
803 if (rc)
804 dprintk("%s: llc_ui_send_data failed: %d\n", __FUNCTION__, rc);
805out: 888out:
806 if (rc) 889 if (rc) {
807 kfree_skb(skb); 890 kfree_skb(skb);
808release: 891release:
809 if (rc)
810 dprintk("%s: failed sending from %02X to %02X: %d\n", 892 dprintk("%s: failed sending from %02X to %02X: %d\n",
811 __FUNCTION__, llc->laddr.lsap, llc->daddr.lsap, rc); 893 __FUNCTION__, llc->laddr.lsap, llc->daddr.lsap, rc);
894 }
812 release_sock(sk); 895 release_sock(sk);
813 return rc ? : copied; 896 return rc ? : copied;
814} 897}
@@ -895,7 +978,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
895 int rc = -EINVAL, opt; 978 int rc = -EINVAL, opt;
896 979
897 lock_sock(sk); 980 lock_sock(sk);
898 if (level != SOL_LLC || optlen != sizeof(int)) 981 if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
899 goto out; 982 goto out;
900 rc = get_user(opt, (int __user *)optval); 983 rc = get_user(opt, (int __user *)optval);
901 if (rc) 984 if (rc)
@@ -915,22 +998,22 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
915 case LLC_OPT_ACK_TMR_EXP: 998 case LLC_OPT_ACK_TMR_EXP:
916 if (opt > LLC_OPT_MAX_ACK_TMR_EXP) 999 if (opt > LLC_OPT_MAX_ACK_TMR_EXP)
917 goto out; 1000 goto out;
918 llc->ack_timer.expire = opt; 1001 llc->ack_timer.expire = opt * HZ;
919 break; 1002 break;
920 case LLC_OPT_P_TMR_EXP: 1003 case LLC_OPT_P_TMR_EXP:
921 if (opt > LLC_OPT_MAX_P_TMR_EXP) 1004 if (opt > LLC_OPT_MAX_P_TMR_EXP)
922 goto out; 1005 goto out;
923 llc->pf_cycle_timer.expire = opt; 1006 llc->pf_cycle_timer.expire = opt * HZ;
924 break; 1007 break;
925 case LLC_OPT_REJ_TMR_EXP: 1008 case LLC_OPT_REJ_TMR_EXP:
926 if (opt > LLC_OPT_MAX_REJ_TMR_EXP) 1009 if (opt > LLC_OPT_MAX_REJ_TMR_EXP)
927 goto out; 1010 goto out;
928 llc->rej_sent_timer.expire = opt; 1011 llc->rej_sent_timer.expire = opt * HZ;
929 break; 1012 break;
930 case LLC_OPT_BUSY_TMR_EXP: 1013 case LLC_OPT_BUSY_TMR_EXP:
931 if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) 1014 if (opt > LLC_OPT_MAX_BUSY_TMR_EXP)
932 goto out; 1015 goto out;
933 llc->busy_state_timer.expire = opt; 1016 llc->busy_state_timer.expire = opt * HZ;
934 break; 1017 break;
935 case LLC_OPT_TX_WIN: 1018 case LLC_OPT_TX_WIN:
936 if (opt > LLC_OPT_MAX_WIN) 1019 if (opt > LLC_OPT_MAX_WIN)
@@ -970,7 +1053,7 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
970 int val = 0, len = 0, rc = -EINVAL; 1053 int val = 0, len = 0, rc = -EINVAL;
971 1054
972 lock_sock(sk); 1055 lock_sock(sk);
973 if (level != SOL_LLC) 1056 if (unlikely(level != SOL_LLC))
974 goto out; 1057 goto out;
975 rc = get_user(len, optlen); 1058 rc = get_user(len, optlen);
976 if (rc) 1059 if (rc)
@@ -980,17 +1063,17 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
980 goto out; 1063 goto out;
981 switch (optname) { 1064 switch (optname) {
982 case LLC_OPT_RETRY: 1065 case LLC_OPT_RETRY:
983 val = llc->n2; break; 1066 val = llc->n2; break;
984 case LLC_OPT_SIZE: 1067 case LLC_OPT_SIZE:
985 val = llc->n1; break; 1068 val = llc->n1; break;
986 case LLC_OPT_ACK_TMR_EXP: 1069 case LLC_OPT_ACK_TMR_EXP:
987 val = llc->ack_timer.expire; break; 1070 val = llc->ack_timer.expire / HZ; break;
988 case LLC_OPT_P_TMR_EXP: 1071 case LLC_OPT_P_TMR_EXP:
989 val = llc->pf_cycle_timer.expire; break; 1072 val = llc->pf_cycle_timer.expire / HZ; break;
990 case LLC_OPT_REJ_TMR_EXP: 1073 case LLC_OPT_REJ_TMR_EXP:
991 val = llc->rej_sent_timer.expire; break; 1074 val = llc->rej_sent_timer.expire / HZ; break;
992 case LLC_OPT_BUSY_TMR_EXP: 1075 case LLC_OPT_BUSY_TMR_EXP:
993 val = llc->busy_state_timer.expire; break; 1076 val = llc->busy_state_timer.expire / HZ; break;
994 case LLC_OPT_TX_WIN: 1077 case LLC_OPT_TX_WIN:
995 val = llc->k; break; 1078 val = llc->k; break;
996 case LLC_OPT_RX_WIN: 1079 case LLC_OPT_RX_WIN:
@@ -1034,8 +1117,12 @@ static struct proto_ops llc_ui_ops = {
1034 .sendpage = sock_no_sendpage, 1117 .sendpage = sock_no_sendpage,
1035}; 1118};
1036 1119
1037extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); 1120static char llc_proc_err_msg[] __initdata =
1038extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); 1121 KERN_CRIT "LLC: Unable to register the proc_fs entries\n";
1122static char llc_sysctl_err_msg[] __initdata =
1123 KERN_CRIT "LLC: Unable to register the sysctl entries\n";
1124static char llc_sock_err_msg[] __initdata =
1125 KERN_CRIT "LLC: Unable to register the network family\n";
1039 1126
1040static int __init llc2_init(void) 1127static int __init llc2_init(void)
1041{ 1128{
@@ -1048,13 +1135,28 @@ static int __init llc2_init(void)
1048 llc_station_init(); 1135 llc_station_init();
1049 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; 1136 llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
1050 rc = llc_proc_init(); 1137 rc = llc_proc_init();
1051 if (rc != 0) 1138 if (rc != 0) {
1139 printk(llc_proc_err_msg);
1052 goto out_unregister_llc_proto; 1140 goto out_unregister_llc_proto;
1053 sock_register(&llc_ui_family_ops); 1141 }
1142 rc = llc_sysctl_init();
1143 if (rc) {
1144 printk(llc_sysctl_err_msg);
1145 goto out_proc;
1146 }
1147 rc = sock_register(&llc_ui_family_ops);
1148 if (rc) {
1149 printk(llc_sock_err_msg);
1150 goto out_sysctl;
1151 }
1054 llc_add_pack(LLC_DEST_SAP, llc_sap_handler); 1152 llc_add_pack(LLC_DEST_SAP, llc_sap_handler);
1055 llc_add_pack(LLC_DEST_CONN, llc_conn_handler); 1153 llc_add_pack(LLC_DEST_CONN, llc_conn_handler);
1056out: 1154out:
1057 return rc; 1155 return rc;
1156out_sysctl:
1157 llc_sysctl_exit();
1158out_proc:
1159 llc_proc_exit();
1058out_unregister_llc_proto: 1160out_unregister_llc_proto:
1059 proto_unregister(&llc_proto); 1161 proto_unregister(&llc_proto);
1060 goto out; 1162 goto out;
@@ -1067,6 +1169,7 @@ static void __exit llc2_exit(void)
1067 llc_remove_pack(LLC_DEST_CONN); 1169 llc_remove_pack(LLC_DEST_CONN);
1068 sock_unregister(PF_LLC); 1170 sock_unregister(PF_LLC);
1069 llc_proc_exit(); 1171 llc_proc_exit();
1172 llc_sysctl_exit();
1070 proto_unregister(&llc_proto); 1173 proto_unregister(&llc_proto);
1071} 1174}
1072 1175
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index b218be4c10ec..b0bcfb1f12dd 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -60,23 +60,10 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
60 60
61int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) 61int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb)
62{ 62{
63 int rc = -ENOTCONN; 63 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
64 u8 dsap;
65 struct llc_sap *sap;
66
67 llc_pdu_decode_dsap(skb, &dsap);
68 sap = llc_sap_find(dsap);
69 if (sap) {
70 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
71 struct llc_sock *llc = llc_sk(sk);
72 64
73 llc_pdu_decode_sa(skb, llc->daddr.mac); 65 ev->ind_prim = LLC_CONN_PRIM;
74 llc_pdu_decode_da(skb, llc->laddr.mac); 66 return 0;
75 llc->dev = skb->dev;
76 ev->ind_prim = LLC_CONN_PRIM;
77 rc = 0;
78 }
79 return rc;
80} 67}
81 68
82int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) 69int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb)
@@ -120,10 +107,8 @@ int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb)
120 reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; 107 reason = LLC_DISC_REASON_RX_DISC_CMD_PDU;
121 } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) 108 } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR)
122 reason = LLC_DISC_REASON_ACK_TMR_EXP; 109 reason = LLC_DISC_REASON_ACK_TMR_EXP;
123 else { 110 else
124 reason = 0;
125 rc = -EINVAL; 111 rc = -EINVAL;
126 }
127 if (!rc) { 112 if (!rc) {
128 ev->reason = reason; 113 ev->reason = reason;
129 ev->ind_prim = LLC_DISC_PRIM; 114 ev->ind_prim = LLC_DISC_PRIM;
@@ -160,9 +145,6 @@ int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb)
160 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) { 145 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) {
161 reason = LLC_RESET_REASON_REMOTE; 146 reason = LLC_RESET_REASON_REMOTE;
162 rc = 0; 147 rc = 0;
163 } else {
164 reason = 0;
165 rc = 1;
166 } 148 }
167 break; 149 break;
168 case LLC_CONN_EV_TYPE_ACK_TMR: 150 case LLC_CONN_EV_TYPE_ACK_TMR:
@@ -172,8 +154,7 @@ int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb)
172 if (llc->retry_count > llc->n2) { 154 if (llc->retry_count > llc->n2) {
173 reason = LLC_RESET_REASON_LOCAL; 155 reason = LLC_RESET_REASON_LOCAL;
174 rc = 0; 156 rc = 0;
175 } else 157 }
176 rc = 1;
177 break; 158 break;
178 } 159 }
179 if (!rc) { 160 if (!rc) {
@@ -217,18 +198,17 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
217int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) 198int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
218{ 199{
219 int rc = -ENOBUFS; 200 int rc = -ENOBUFS;
220 struct sk_buff *nskb = llc_alloc_frame(); 201 struct llc_sock *llc = llc_sk(sk);
202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
221 203
222 if (nskb) { 204 if (nskb) {
223 struct llc_sock *llc = llc_sk(sk);
224 struct llc_sap *sap = llc->sap; 205 struct llc_sap *sap = llc->sap;
225 206
226 nskb->dev = llc->dev;
227 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 207 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
228 llc->daddr.lsap, LLC_PDU_CMD); 208 llc->daddr.lsap, LLC_PDU_CMD);
229 llc_pdu_init_as_disc_cmd(nskb, 1); 209 llc_pdu_init_as_disc_cmd(nskb, 1);
230 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 210 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
231 if (rc) 211 if (unlikely(rc))
232 goto free; 212 goto free;
233 llc_conn_send_pdu(sk, nskb); 213 llc_conn_send_pdu(sk, nskb);
234 llc_conn_ac_set_p_flag_1(sk, skb); 214 llc_conn_ac_set_p_flag_1(sk, skb);
@@ -243,20 +223,19 @@ free:
243int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) 223int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
244{ 224{
245 int rc = -ENOBUFS; 225 int rc = -ENOBUFS;
246 struct sk_buff *nskb = llc_alloc_frame(); 226 struct llc_sock *llc = llc_sk(sk);
227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
247 228
248 if (nskb) { 229 if (nskb) {
249 struct llc_sock *llc = llc_sk(sk);
250 struct llc_sap *sap = llc->sap; 230 struct llc_sap *sap = llc->sap;
251 u8 f_bit; 231 u8 f_bit;
252 232
253 nskb->dev = llc->dev;
254 llc_pdu_decode_pf_bit(skb, &f_bit); 233 llc_pdu_decode_pf_bit(skb, &f_bit);
255 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 234 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
256 llc->daddr.lsap, LLC_PDU_RSP); 235 llc->daddr.lsap, LLC_PDU_RSP);
257 llc_pdu_init_as_dm_rsp(nskb, f_bit); 236 llc_pdu_init_as_dm_rsp(nskb, f_bit);
258 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 237 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
259 if (rc) 238 if (unlikely(rc))
260 goto free; 239 goto free;
261 llc_conn_send_pdu(sk, nskb); 240 llc_conn_send_pdu(sk, nskb);
262 } 241 }
@@ -270,19 +249,17 @@ free:
270int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) 249int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
271{ 250{
272 int rc = -ENOBUFS; 251 int rc = -ENOBUFS;
273 struct sk_buff *nskb = llc_alloc_frame(); 252 struct llc_sock *llc = llc_sk(sk);
253 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
274 254
275 if (nskb) { 255 if (nskb) {
276 struct llc_sock *llc = llc_sk(sk);
277 struct llc_sap *sap = llc->sap; 256 struct llc_sap *sap = llc->sap;
278 u8 f_bit = 1;
279 257
280 nskb->dev = llc->dev;
281 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 258 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
282 llc->daddr.lsap, LLC_PDU_RSP); 259 llc->daddr.lsap, LLC_PDU_RSP);
283 llc_pdu_init_as_dm_rsp(nskb, f_bit); 260 llc_pdu_init_as_dm_rsp(nskb, 1);
284 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 261 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
285 if (rc) 262 if (unlikely(rc))
286 goto free; 263 goto free;
287 llc_conn_send_pdu(sk, nskb); 264 llc_conn_send_pdu(sk, nskb);
288 } 265 }
@@ -306,17 +283,16 @@ int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb)
306 llc_pdu_decode_pf_bit(skb, &f_bit); 283 llc_pdu_decode_pf_bit(skb, &f_bit);
307 else 284 else
308 f_bit = 0; 285 f_bit = 0;
309 nskb = llc_alloc_frame(); 286 nskb = llc_alloc_frame(sk, llc->dev);
310 if (nskb) { 287 if (nskb) {
311 struct llc_sap *sap = llc->sap; 288 struct llc_sap *sap = llc->sap;
312 289
313 nskb->dev = llc->dev;
314 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 290 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
315 llc->daddr.lsap, LLC_PDU_RSP); 291 llc->daddr.lsap, LLC_PDU_RSP);
316 llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, 292 llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS,
317 llc->vR, INCORRECT); 293 llc->vR, INCORRECT);
318 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 294 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
319 if (rc) 295 if (unlikely(rc))
320 goto free; 296 goto free;
321 llc_conn_send_pdu(sk, nskb); 297 llc_conn_send_pdu(sk, nskb);
322 } 298 }
@@ -330,21 +306,19 @@ free:
330int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) 306int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb)
331{ 307{
332 int rc = -ENOBUFS; 308 int rc = -ENOBUFS;
333 struct sk_buff *nskb = llc_alloc_frame(); 309 struct llc_sock *llc = llc_sk(sk);
310 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
334 311
335 if (nskb) { 312 if (nskb) {
336 u8 f_bit = 0;
337 struct llc_sock *llc = llc_sk(sk);
338 struct llc_sap *sap = llc->sap; 313 struct llc_sap *sap = llc->sap;
339 struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr; 314 struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr;
340 315
341 nskb->dev = llc->dev;
342 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 316 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
343 llc->daddr.lsap, LLC_PDU_RSP); 317 llc->daddr.lsap, LLC_PDU_RSP);
344 llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, 318 llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS,
345 llc->vR, INCORRECT); 319 llc->vR, INCORRECT);
346 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 320 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
347 if (rc) 321 if (unlikely(rc))
348 goto free; 322 goto free;
349 llc_conn_send_pdu(sk, nskb); 323 llc_conn_send_pdu(sk, nskb);
350 } 324 }
@@ -360,21 +334,20 @@ int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
360 u8 f_bit; 334 u8 f_bit;
361 int rc = -ENOBUFS; 335 int rc = -ENOBUFS;
362 struct sk_buff *nskb; 336 struct sk_buff *nskb;
337 struct llc_sock *llc = llc_sk(sk);
363 338
364 llc_pdu_decode_pf_bit(skb, &f_bit); 339 llc_pdu_decode_pf_bit(skb, &f_bit);
365 nskb = llc_alloc_frame(); 340 nskb = llc_alloc_frame(sk, llc->dev);
366 if (nskb) { 341 if (nskb) {
367 struct llc_sock *llc = llc_sk(sk);
368 struct llc_sap *sap = llc->sap; 342 struct llc_sap *sap = llc->sap;
369 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 343 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
370 344
371 nskb->dev = llc->dev;
372 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 345 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
373 llc->daddr.lsap, LLC_PDU_RSP); 346 llc->daddr.lsap, LLC_PDU_RSP);
374 llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, 347 llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS,
375 llc->vR, INCORRECT); 348 llc->vR, INCORRECT);
376 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 349 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
377 if (rc) 350 if (unlikely(rc))
378 goto free; 351 goto free;
379 llc_conn_send_pdu(sk, nskb); 352 llc_conn_send_pdu(sk, nskb);
380 } 353 }
@@ -395,7 +368,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
395 llc->daddr.lsap, LLC_PDU_CMD); 368 llc->daddr.lsap, LLC_PDU_CMD);
396 llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); 369 llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
397 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 370 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
398 if (!rc) { 371 if (likely(!rc)) {
399 llc_conn_send_pdu(sk, skb); 372 llc_conn_send_pdu(sk, skb);
400 llc_conn_ac_inc_vs_by_1(sk, skb); 373 llc_conn_ac_inc_vs_by_1(sk, skb);
401 } 374 }
@@ -412,7 +385,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
412 llc->daddr.lsap, LLC_PDU_CMD); 385 llc->daddr.lsap, LLC_PDU_CMD);
413 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); 386 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
414 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 387 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
415 if (!rc) { 388 if (likely(!rc)) {
416 llc_conn_send_pdu(sk, skb); 389 llc_conn_send_pdu(sk, skb);
417 llc_conn_ac_inc_vs_by_1(sk, skb); 390 llc_conn_ac_inc_vs_by_1(sk, skb);
418 } 391 }
@@ -429,7 +402,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
429 llc->daddr.lsap, LLC_PDU_CMD); 402 llc->daddr.lsap, LLC_PDU_CMD);
430 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); 403 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
431 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 404 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
432 if (!rc) { 405 if (likely(!rc)) {
433 llc_conn_send_pdu(sk, skb); 406 llc_conn_send_pdu(sk, skb);
434 llc_conn_ac_inc_vs_by_1(sk, skb); 407 llc_conn_ac_inc_vs_by_1(sk, skb);
435 } 408 }
@@ -451,18 +424,17 @@ int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
451 u8 nr; 424 u8 nr;
452 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 425 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
453 int rc = -ENOBUFS; 426 int rc = -ENOBUFS;
454 struct sk_buff *nskb = llc_alloc_frame(); 427 struct llc_sock *llc = llc_sk(sk);
428 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
455 429
456 if (nskb) { 430 if (nskb) {
457 struct llc_sock *llc = llc_sk(sk);
458 struct llc_sap *sap = llc->sap; 431 struct llc_sap *sap = llc->sap;
459 432
460 nskb->dev = llc->dev;
461 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 433 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
462 llc->daddr.lsap, LLC_PDU_RSP); 434 llc->daddr.lsap, LLC_PDU_RSP);
463 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); 435 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
464 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 436 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
465 if (!rc) 437 if (likely(!rc))
466 llc_conn_send_pdu(sk, nskb); 438 llc_conn_send_pdu(sk, nskb);
467 else 439 else
468 kfree_skb(skb); 440 kfree_skb(skb);
@@ -487,18 +459,17 @@ int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
487int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) 459int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
488{ 460{
489 int rc = -ENOBUFS; 461 int rc = -ENOBUFS;
490 struct sk_buff *nskb = llc_alloc_frame(); 462 struct llc_sock *llc = llc_sk(sk);
463 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
491 464
492 if (nskb) { 465 if (nskb) {
493 struct llc_sock *llc = llc_sk(sk);
494 struct llc_sap *sap = llc->sap; 466 struct llc_sap *sap = llc->sap;
495 467
496 nskb->dev = llc->dev;
497 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 468 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
498 llc->daddr.lsap, LLC_PDU_CMD); 469 llc->daddr.lsap, LLC_PDU_CMD);
499 llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR); 470 llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR);
500 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 471 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
501 if (rc) 472 if (unlikely(rc))
502 goto free; 473 goto free;
503 llc_conn_send_pdu(sk, nskb); 474 llc_conn_send_pdu(sk, nskb);
504 } 475 }
@@ -512,19 +483,17 @@ free:
512int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) 483int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
513{ 484{
514 int rc = -ENOBUFS; 485 int rc = -ENOBUFS;
515 struct sk_buff *nskb = llc_alloc_frame(); 486 struct llc_sock *llc = llc_sk(sk);
487 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
516 488
517 if (nskb) { 489 if (nskb) {
518 u8 f_bit = 1;
519 struct llc_sock *llc = llc_sk(sk);
520 struct llc_sap *sap = llc->sap; 490 struct llc_sap *sap = llc->sap;
521 491
522 nskb->dev = llc->dev;
523 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 492 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
524 llc->daddr.lsap, LLC_PDU_RSP); 493 llc->daddr.lsap, LLC_PDU_RSP);
525 llc_pdu_init_as_rej_rsp(nskb, f_bit, llc->vR); 494 llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR);
526 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 495 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
527 if (rc) 496 if (unlikely(rc))
528 goto free; 497 goto free;
529 llc_conn_send_pdu(sk, nskb); 498 llc_conn_send_pdu(sk, nskb);
530 } 499 }
@@ -538,19 +507,17 @@ free:
538int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) 507int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
539{ 508{
540 int rc = -ENOBUFS; 509 int rc = -ENOBUFS;
541 struct sk_buff *nskb = llc_alloc_frame(); 510 struct llc_sock *llc = llc_sk(sk);
511 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
542 512
543 if (nskb) { 513 if (nskb) {
544 struct llc_sock *llc = llc_sk(sk);
545 struct llc_sap *sap = llc->sap; 514 struct llc_sap *sap = llc->sap;
546 u8 f_bit = 0;
547 515
548 nskb->dev = llc->dev;
549 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 516 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
550 llc->daddr.lsap, LLC_PDU_RSP); 517 llc->daddr.lsap, LLC_PDU_RSP);
551 llc_pdu_init_as_rej_rsp(nskb, f_bit, llc->vR); 518 llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR);
552 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 519 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
553 if (rc) 520 if (unlikely(rc))
554 goto free; 521 goto free;
555 llc_conn_send_pdu(sk, nskb); 522 llc_conn_send_pdu(sk, nskb);
556 } 523 }
@@ -564,18 +531,17 @@ free:
564int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) 531int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
565{ 532{
566 int rc = -ENOBUFS; 533 int rc = -ENOBUFS;
567 struct sk_buff *nskb = llc_alloc_frame(); 534 struct llc_sock *llc = llc_sk(sk);
535 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
568 536
569 if (nskb) { 537 if (nskb) {
570 struct llc_sock *llc = llc_sk(sk);
571 struct llc_sap *sap = llc->sap; 538 struct llc_sap *sap = llc->sap;
572 539
573 nskb->dev = llc->dev;
574 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 540 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
575 llc->daddr.lsap, LLC_PDU_CMD); 541 llc->daddr.lsap, LLC_PDU_CMD);
576 llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR); 542 llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR);
577 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 543 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
578 if (rc) 544 if (unlikely(rc))
579 goto free; 545 goto free;
580 llc_conn_send_pdu(sk, nskb); 546 llc_conn_send_pdu(sk, nskb);
581 } 547 }
@@ -589,19 +555,17 @@ free:
589int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) 555int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
590{ 556{
591 int rc = -ENOBUFS; 557 int rc = -ENOBUFS;
592 struct sk_buff *nskb = llc_alloc_frame(); 558 struct llc_sock *llc = llc_sk(sk);
559 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
593 560
594 if (nskb) { 561 if (nskb) {
595 struct llc_sock *llc = llc_sk(sk);
596 struct llc_sap *sap = llc->sap; 562 struct llc_sap *sap = llc->sap;
597 u8 f_bit = 1;
598 563
599 nskb->dev = llc->dev;
600 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 564 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
601 llc->daddr.lsap, LLC_PDU_RSP); 565 llc->daddr.lsap, LLC_PDU_RSP);
602 llc_pdu_init_as_rnr_rsp(nskb, f_bit, llc->vR); 566 llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR);
603 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 567 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
604 if (rc) 568 if (unlikely(rc))
605 goto free; 569 goto free;
606 llc_conn_send_pdu(sk, nskb); 570 llc_conn_send_pdu(sk, nskb);
607 } 571 }
@@ -615,19 +579,17 @@ free:
615int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) 579int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
616{ 580{
617 int rc = -ENOBUFS; 581 int rc = -ENOBUFS;
618 struct sk_buff *nskb = llc_alloc_frame(); 582 struct llc_sock *llc = llc_sk(sk);
583 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
619 584
620 if (nskb) { 585 if (nskb) {
621 u8 f_bit = 0;
622 struct llc_sock *llc = llc_sk(sk);
623 struct llc_sap *sap = llc->sap; 586 struct llc_sap *sap = llc->sap;
624 587
625 nskb->dev = llc->dev;
626 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 588 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
627 llc->daddr.lsap, LLC_PDU_RSP); 589 llc->daddr.lsap, LLC_PDU_RSP);
628 llc_pdu_init_as_rnr_rsp(nskb, f_bit, llc->vR); 590 llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR);
629 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 591 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
630 if (rc) 592 if (unlikely(rc))
631 goto free; 593 goto free;
632 llc_conn_send_pdu(sk, nskb); 594 llc_conn_send_pdu(sk, nskb);
633 } 595 }
@@ -645,7 +607,7 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
645 if (!llc->remote_busy_flag) { 607 if (!llc->remote_busy_flag) {
646 llc->remote_busy_flag = 1; 608 llc->remote_busy_flag = 1;
647 mod_timer(&llc->busy_state_timer.timer, 609 mod_timer(&llc->busy_state_timer.timer,
648 jiffies + llc->busy_state_timer.expire * HZ); 610 jiffies + llc->busy_state_timer.expire);
649 } 611 }
650 return 0; 612 return 0;
651} 613}
@@ -653,18 +615,17 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
653int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) 615int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
654{ 616{
655 int rc = -ENOBUFS; 617 int rc = -ENOBUFS;
656 struct sk_buff *nskb = llc_alloc_frame(); 618 struct llc_sock *llc = llc_sk(sk);
619 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
657 620
658 if (nskb) { 621 if (nskb) {
659 struct llc_sock *llc = llc_sk(sk);
660 struct llc_sap *sap = llc->sap; 622 struct llc_sap *sap = llc->sap;
661 623
662 nskb->dev = llc->dev;
663 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 624 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
664 llc->daddr.lsap, LLC_PDU_RSP); 625 llc->daddr.lsap, LLC_PDU_RSP);
665 llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); 626 llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR);
666 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 627 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
667 if (rc) 628 if (unlikely(rc))
668 goto free; 629 goto free;
669 llc_conn_send_pdu(sk, nskb); 630 llc_conn_send_pdu(sk, nskb);
670 } 631 }
@@ -678,18 +639,17 @@ free:
678int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) 639int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
679{ 640{
680 int rc = -ENOBUFS; 641 int rc = -ENOBUFS;
681 struct sk_buff *nskb = llc_alloc_frame(); 642 struct llc_sock *llc = llc_sk(sk);
643 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
682 644
683 if (nskb) { 645 if (nskb) {
684 struct llc_sock *llc = llc_sk(sk);
685 struct llc_sap *sap = llc->sap; 646 struct llc_sap *sap = llc->sap;
686 647
687 nskb->dev = llc->dev;
688 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 648 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
689 llc->daddr.lsap, LLC_PDU_CMD); 649 llc->daddr.lsap, LLC_PDU_CMD);
690 llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR); 650 llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR);
691 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 651 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
692 if (rc) 652 if (unlikely(rc))
693 goto free; 653 goto free;
694 llc_conn_send_pdu(sk, nskb); 654 llc_conn_send_pdu(sk, nskb);
695 } 655 }
@@ -703,19 +663,18 @@ free:
703int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) 663int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
704{ 664{
705 int rc = -ENOBUFS; 665 int rc = -ENOBUFS;
706 struct sk_buff *nskb = llc_alloc_frame(); 666 struct llc_sock *llc = llc_sk(sk);
667 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
707 668
708 if (nskb) { 669 if (nskb) {
709 struct llc_sock *llc = llc_sk(sk);
710 struct llc_sap *sap = llc->sap; 670 struct llc_sap *sap = llc->sap;
711 u8 f_bit = 1; 671 u8 f_bit = 1;
712 672
713 nskb->dev = llc->dev;
714 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 673 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
715 llc->daddr.lsap, LLC_PDU_RSP); 674 llc->daddr.lsap, LLC_PDU_RSP);
716 llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); 675 llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR);
717 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 676 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
718 if (rc) 677 if (unlikely(rc))
719 goto free; 678 goto free;
720 llc_conn_send_pdu(sk, nskb); 679 llc_conn_send_pdu(sk, nskb);
721 } 680 }
@@ -729,19 +688,17 @@ free:
729int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) 688int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
730{ 689{
731 int rc = -ENOBUFS; 690 int rc = -ENOBUFS;
732 struct sk_buff *nskb = llc_alloc_frame(); 691 struct llc_sock *llc = llc_sk(sk);
692 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
733 693
734 if (nskb) { 694 if (nskb) {
735 struct llc_sock *llc = llc_sk(sk);
736 struct llc_sap *sap = llc->sap; 695 struct llc_sap *sap = llc->sap;
737 u8 f_bit = 1;
738 696
739 nskb->dev = llc->dev;
740 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 697 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
741 llc->daddr.lsap, LLC_PDU_RSP); 698 llc->daddr.lsap, LLC_PDU_RSP);
742 llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); 699 llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR);
743 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 700 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
744 if (rc) 701 if (unlikely(rc))
745 goto free; 702 goto free;
746 llc_conn_send_pdu(sk, nskb); 703 llc_conn_send_pdu(sk, nskb);
747 } 704 }
@@ -755,18 +712,17 @@ free:
755int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) 712int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
756{ 713{
757 int rc = -ENOBUFS; 714 int rc = -ENOBUFS;
758 struct sk_buff *nskb = llc_alloc_frame(); 715 struct llc_sock *llc = llc_sk(sk);
716 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
759 717
760 if (nskb) { 718 if (nskb) {
761 struct llc_sock *llc = llc_sk(sk);
762 struct llc_sap *sap = llc->sap; 719 struct llc_sap *sap = llc->sap;
763 720
764 nskb->dev = llc->dev;
765 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 721 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
766 llc->daddr.lsap, LLC_PDU_RSP); 722 llc->daddr.lsap, LLC_PDU_RSP);
767 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); 723 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
768 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 724 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
769 if (rc) 725 if (unlikely(rc))
770 goto free; 726 goto free;
771 llc_conn_send_pdu(sk, nskb); 727 llc_conn_send_pdu(sk, nskb);
772 } 728 }
@@ -780,18 +736,17 @@ free:
780int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) 736int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
781{ 737{
782 int rc = -ENOBUFS; 738 int rc = -ENOBUFS;
783 struct sk_buff *nskb = llc_alloc_frame(); 739 struct llc_sock *llc = llc_sk(sk);
740 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
784 741
785 if (nskb) { 742 if (nskb) {
786 struct llc_sock *llc = llc_sk(sk);
787 struct llc_sap *sap = llc->sap; 743 struct llc_sap *sap = llc->sap;
788 744
789 nskb->dev = llc->dev;
790 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 745 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
791 llc->daddr.lsap, LLC_PDU_RSP); 746 llc->daddr.lsap, LLC_PDU_RSP);
792 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); 747 llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR);
793 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 748 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
794 if (rc) 749 if (unlikely(rc))
795 goto free; 750 goto free;
796 llc_conn_send_pdu(sk, nskb); 751 llc_conn_send_pdu(sk, nskb);
797 } 752 }
@@ -815,8 +770,8 @@ void llc_conn_set_p_flag(struct sock *sk, u8 value)
815int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) 770int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
816{ 771{
817 int rc = -ENOBUFS; 772 int rc = -ENOBUFS;
818 struct sk_buff *nskb = llc_alloc_frame();
819 struct llc_sock *llc = llc_sk(sk); 773 struct llc_sock *llc = llc_sk(sk);
774 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
820 775
821 if (nskb) { 776 if (nskb) {
822 struct llc_sap *sap = llc->sap; 777 struct llc_sap *sap = llc->sap;
@@ -824,12 +779,11 @@ int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
824 779
825 if (llc->dev->flags & IFF_LOOPBACK) 780 if (llc->dev->flags & IFF_LOOPBACK)
826 dmac = llc->dev->dev_addr; 781 dmac = llc->dev->dev_addr;
827 nskb->dev = llc->dev;
828 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 782 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap,
829 llc->daddr.lsap, LLC_PDU_CMD); 783 llc->daddr.lsap, LLC_PDU_CMD);
830 llc_pdu_init_as_sabme_cmd(nskb, 1); 784 llc_pdu_init_as_sabme_cmd(nskb, 1);
831 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac); 785 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac);
832 if (rc) 786 if (unlikely(rc))
833 goto free; 787 goto free;
834 llc_conn_send_pdu(sk, nskb); 788 llc_conn_send_pdu(sk, nskb);
835 llc_conn_set_p_flag(sk, 1); 789 llc_conn_set_p_flag(sk, 1);
@@ -845,11 +799,11 @@ int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
845{ 799{
846 u8 f_bit; 800 u8 f_bit;
847 int rc = -ENOBUFS; 801 int rc = -ENOBUFS;
848 struct sk_buff *nskb = llc_alloc_frame(); 802 struct llc_sock *llc = llc_sk(sk);
803 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
849 804
850 llc_pdu_decode_pf_bit(skb, &f_bit); 805 llc_pdu_decode_pf_bit(skb, &f_bit);
851 if (nskb) { 806 if (nskb) {
852 struct llc_sock *llc = llc_sk(sk);
853 struct llc_sap *sap = llc->sap; 807 struct llc_sap *sap = llc->sap;
854 808
855 nskb->dev = llc->dev; 809 nskb->dev = llc->dev;
@@ -857,7 +811,7 @@ int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
857 llc->daddr.lsap, LLC_PDU_RSP); 811 llc->daddr.lsap, LLC_PDU_RSP);
858 llc_pdu_init_as_ua_rsp(nskb, f_bit); 812 llc_pdu_init_as_ua_rsp(nskb, f_bit);
859 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 813 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
860 if (rc) 814 if (unlikely(rc))
861 goto free; 815 goto free;
862 llc_conn_send_pdu(sk, nskb); 816 llc_conn_send_pdu(sk, nskb);
863 } 817 }
@@ -886,7 +840,7 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
886 840
887 llc_conn_set_p_flag(sk, 1); 841 llc_conn_set_p_flag(sk, 1);
888 mod_timer(&llc->pf_cycle_timer.timer, 842 mod_timer(&llc->pf_cycle_timer.timer,
889 jiffies + llc->pf_cycle_timer.expire * HZ); 843 jiffies + llc->pf_cycle_timer.expire);
890 return 0; 844 return 0;
891} 845}
892 846
@@ -957,7 +911,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
957 llc->daddr.lsap, LLC_PDU_RSP); 911 llc->daddr.lsap, LLC_PDU_RSP);
958 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); 912 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
959 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 913 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
960 if (!rc) { 914 if (likely(!rc)) {
961 llc_conn_send_pdu(sk, skb); 915 llc_conn_send_pdu(sk, skb);
962 llc_conn_ac_inc_vs_by_1(sk, skb); 916 llc_conn_ac_inc_vs_by_1(sk, skb);
963 } 917 }
@@ -1001,18 +955,17 @@ static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk,
1001 struct sk_buff *skb) 955 struct sk_buff *skb)
1002{ 956{
1003 int rc = -ENOBUFS; 957 int rc = -ENOBUFS;
1004 struct sk_buff *nskb = llc_alloc_frame(); 958 struct llc_sock *llc = llc_sk(sk);
959 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev);
1005 960
1006 if (nskb) { 961 if (nskb) {
1007 struct llc_sock *llc = llc_sk(sk);
1008 struct llc_sap *sap = llc->sap; 962 struct llc_sap *sap = llc->sap;
1009 963
1010 nskb->dev = llc->dev;
1011 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, 964 llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap,
1012 llc->daddr.lsap, LLC_PDU_RSP); 965 llc->daddr.lsap, LLC_PDU_RSP);
1013 llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR); 966 llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR);
1014 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 967 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac);
1015 if (rc) 968 if (unlikely(rc))
1016 goto free; 969 goto free;
1017 llc_conn_send_pdu(sk, nskb); 970 llc_conn_send_pdu(sk, nskb);
1018 } 971 }
@@ -1165,7 +1118,7 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
1165{ 1118{
1166 struct llc_sock *llc = llc_sk(sk); 1119 struct llc_sock *llc = llc_sk(sk);
1167 1120
1168 mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire * HZ); 1121 mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire);
1169 return 0; 1122 return 0;
1170} 1123}
1171 1124
@@ -1174,7 +1127,7 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
1174 struct llc_sock *llc = llc_sk(sk); 1127 struct llc_sock *llc = llc_sk(sk);
1175 1128
1176 mod_timer(&llc->rej_sent_timer.timer, 1129 mod_timer(&llc->rej_sent_timer.timer,
1177 jiffies + llc->rej_sent_timer.expire * HZ); 1130 jiffies + llc->rej_sent_timer.expire);
1178 return 0; 1131 return 0;
1179} 1132}
1180 1133
@@ -1185,7 +1138,7 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
1185 1138
1186 if (!timer_pending(&llc->ack_timer.timer)) 1139 if (!timer_pending(&llc->ack_timer.timer))
1187 mod_timer(&llc->ack_timer.timer, 1140 mod_timer(&llc->ack_timer.timer,
1188 jiffies + llc->ack_timer.expire * HZ); 1141 jiffies + llc->ack_timer.expire);
1189 return 0; 1142 return 0;
1190} 1143}
1191 1144
@@ -1233,7 +1186,7 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
1233 } 1186 }
1234 if (unacked) 1187 if (unacked)
1235 mod_timer(&llc->ack_timer.timer, 1188 mod_timer(&llc->ack_timer.timer,
1236 jiffies + llc->ack_timer.expire * HZ); 1189 jiffies + llc->ack_timer.expire);
1237 } else if (llc->failed_data_req) { 1190 } else if (llc->failed_data_req) {
1238 u8 f_bit; 1191 u8 f_bit;
1239 1192
@@ -1354,13 +1307,13 @@ int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb)
1354 return 0; 1307 return 0;
1355} 1308}
1356 1309
1357int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) 1310static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb)
1358{ 1311{
1359 llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % 128; 1312 llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % 128;
1360 return 0; 1313 return 0;
1361} 1314}
1362 1315
1363void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) 1316static void llc_conn_tmr_common_cb(unsigned long timeout_data, u8 type)
1364{ 1317{
1365 struct sock *sk = (struct sock *)timeout_data; 1318 struct sock *sk = (struct sock *)timeout_data;
1366 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); 1319 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
@@ -1369,59 +1322,31 @@ void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data)
1369 if (skb) { 1322 if (skb) {
1370 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 1323 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
1371 1324
1372 skb->sk = sk; 1325 skb_set_owner_r(skb, sk);
1373 ev->type = LLC_CONN_EV_TYPE_P_TMR; 1326 ev->type = type;
1374 llc_process_tmr_ev(sk, skb); 1327 llc_process_tmr_ev(sk, skb);
1375 } 1328 }
1376 bh_unlock_sock(sk); 1329 bh_unlock_sock(sk);
1377} 1330}
1378 1331
1379void llc_conn_busy_tmr_cb(unsigned long timeout_data) 1332void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data)
1380{ 1333{
1381 struct sock *sk = (struct sock *)timeout_data; 1334 llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_P_TMR);
1382 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); 1335}
1383
1384 bh_lock_sock(sk);
1385 if (skb) {
1386 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
1387 1336
1388 skb->sk = sk; 1337void llc_conn_busy_tmr_cb(unsigned long timeout_data)
1389 ev->type = LLC_CONN_EV_TYPE_BUSY_TMR; 1338{
1390 llc_process_tmr_ev(sk, skb); 1339 llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_BUSY_TMR);
1391 }
1392 bh_unlock_sock(sk);
1393} 1340}
1394 1341
1395void llc_conn_ack_tmr_cb(unsigned long timeout_data) 1342void llc_conn_ack_tmr_cb(unsigned long timeout_data)
1396{ 1343{
1397 struct sock* sk = (struct sock *)timeout_data; 1344 llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_ACK_TMR);
1398 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
1399
1400 bh_lock_sock(sk);
1401 if (skb) {
1402 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
1403
1404 skb->sk = sk;
1405 ev->type = LLC_CONN_EV_TYPE_ACK_TMR;
1406 llc_process_tmr_ev(sk, skb);
1407 }
1408 bh_unlock_sock(sk);
1409} 1345}
1410 1346
1411void llc_conn_rej_tmr_cb(unsigned long timeout_data) 1347void llc_conn_rej_tmr_cb(unsigned long timeout_data)
1412{ 1348{
1413 struct sock *sk = (struct sock *)timeout_data; 1349 llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_REJ_TMR);
1414 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
1415
1416 bh_lock_sock(sk);
1417 if (skb) {
1418 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
1419
1420 skb->sk = sk;
1421 ev->type = LLC_CONN_EV_TYPE_REJ_TMR;
1422 llc_process_tmr_ev(sk, skb);
1423 }
1424 bh_unlock_sock(sk);
1425} 1350}
1426 1351
1427int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) 1352int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb)
diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c
index d5bdb53a348f..c5deda246614 100644
--- a/net/llc/llc_c_ev.c
+++ b/net/llc/llc_c_ev.c
@@ -37,6 +37,7 @@
37#include <net/llc_conn.h> 37#include <net/llc_conn.h>
38#include <net/llc_sap.h> 38#include <net/llc_sap.h>
39#include <net/sock.h> 39#include <net/sock.h>
40#include <net/llc_c_ac.h>
40#include <net/llc_c_ev.h> 41#include <net/llc_c_ev.h>
41#include <net/llc_pdu.h> 42#include <net/llc_pdu.h>
42 43
@@ -46,8 +47,6 @@
46#define dprintk(args...) 47#define dprintk(args...)
47#endif 48#endif
48 49
49extern u16 llc_circular_between(u8 a, u8 b, u8 c);
50
51/** 50/**
52 * llc_util_ns_inside_rx_window - check if sequence number is in rx window 51 * llc_util_ns_inside_rx_window - check if sequence number is in rx window
53 * @ns: sequence number of received pdu. 52 * @ns: sequence number of received pdu.
@@ -99,7 +98,7 @@ out:
99 98
100int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) 99int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb)
101{ 100{
102 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 101 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
103 102
104 return ev->prim == LLC_CONN_PRIM && 103 return ev->prim == LLC_CONN_PRIM &&
105 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; 104 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
@@ -107,7 +106,7 @@ int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb)
107 106
108int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) 107int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb)
109{ 108{
110 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 109 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
111 110
112 return ev->prim == LLC_DATA_PRIM && 111 return ev->prim == LLC_DATA_PRIM &&
113 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; 112 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
@@ -115,7 +114,7 @@ int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb)
115 114
116int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) 115int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb)
117{ 116{
118 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 117 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
119 118
120 return ev->prim == LLC_DISC_PRIM && 119 return ev->prim == LLC_DISC_PRIM &&
121 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; 120 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
@@ -123,7 +122,7 @@ int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb)
123 122
124int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) 123int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb)
125{ 124{
126 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 125 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
127 126
128 return ev->prim == LLC_RESET_PRIM && 127 return ev->prim == LLC_RESET_PRIM &&
129 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; 128 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
@@ -131,7 +130,7 @@ int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb)
131 130
132int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) 131int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb)
133{ 132{
134 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 133 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
135 134
136 return ev->type == LLC_CONN_EV_TYPE_SIMPLE && 135 return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
137 ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_DETECTED ? 0 : 1; 136 ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_DETECTED ? 0 : 1;
@@ -139,7 +138,7 @@ int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb)
139 138
140int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb) 139int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb)
141{ 140{
142 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 141 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
143 142
144 return ev->type == LLC_CONN_EV_TYPE_SIMPLE && 143 return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
145 ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_CLEARED ? 0 : 1; 144 ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_CLEARED ? 0 : 1;
@@ -152,7 +151,7 @@ int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb)
152 151
153int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) 152int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
154{ 153{
155 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 154 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
156 155
157 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && 156 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
158 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC ? 0 : 1; 157 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC ? 0 : 1;
@@ -160,7 +159,7 @@ int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
160 159
161int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) 160int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
162{ 161{
163 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 162 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
164 163
165 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && 164 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
166 LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM ? 0 : 1; 165 LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM ? 0 : 1;
@@ -168,7 +167,7 @@ int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
168 167
169int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) 168int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
170{ 169{
171 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 170 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
172 171
173 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && 172 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
174 LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR ? 0 : 1; 173 LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR ? 0 : 1;
@@ -176,7 +175,7 @@ int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
176 175
177int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) 176int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
178{ 177{
179 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 178 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
180 179
181 return llc_conn_space(sk, skb) && 180 return llc_conn_space(sk, skb) &&
182 LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 181 LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
@@ -186,7 +185,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
186 185
187int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) 186int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
188{ 187{
189 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 188 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
190 189
191 return llc_conn_space(sk, skb) && 190 return llc_conn_space(sk, skb) &&
192 LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 191 LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
@@ -197,9 +196,9 @@ int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
197int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, 196int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
198 struct sk_buff *skb) 197 struct sk_buff *skb)
199{ 198{
200 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 199 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
201 u8 vr = llc_sk(sk)->vR; 200 const u8 vr = llc_sk(sk)->vR;
202 u8 ns = LLC_I_GET_NS(pdu); 201 const u8 ns = LLC_I_GET_NS(pdu);
203 202
204 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 203 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
205 LLC_I_PF_IS_0(pdu) && ns != vr && 204 LLC_I_PF_IS_0(pdu) && ns != vr &&
@@ -209,9 +208,9 @@ int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
209int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, 208int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
210 struct sk_buff *skb) 209 struct sk_buff *skb)
211{ 210{
212 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 211 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
213 u8 vr = llc_sk(sk)->vR; 212 const u8 vr = llc_sk(sk)->vR;
214 u8 ns = LLC_I_GET_NS(pdu); 213 const u8 ns = LLC_I_GET_NS(pdu);
215 214
216 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 215 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
217 LLC_I_PF_IS_1(pdu) && ns != vr && 216 LLC_I_PF_IS_1(pdu) && ns != vr &&
@@ -221,10 +220,11 @@ int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
221int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, 220int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
222 struct sk_buff *skb) 221 struct sk_buff *skb)
223{ 222{
224 struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb); 223 const struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb);
225 u8 vr = llc_sk(sk)->vR; 224 const u8 vr = llc_sk(sk)->vR;
226 u8 ns = LLC_I_GET_NS(pdu); 225 const u8 ns = LLC_I_GET_NS(pdu);
227 u16 rc = LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && 226 const u16 rc = LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
227 ns != vr &&
228 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; 228 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
229 if (!rc) 229 if (!rc)
230 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", 230 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
@@ -234,7 +234,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
234 234
235int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) 235int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
236{ 236{
237 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 237 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
238 238
239 return llc_conn_space(sk, skb) && 239 return llc_conn_space(sk, skb) &&
240 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 240 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
@@ -244,7 +244,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
244 244
245int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) 245int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
246{ 246{
247 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 247 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
248 248
249 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 249 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
250 LLC_I_PF_IS_1(pdu) && 250 LLC_I_PF_IS_1(pdu) &&
@@ -253,7 +253,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
253 253
254int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) 254int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
255{ 255{
256 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 256 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
257 257
258 return llc_conn_space(sk, skb) && 258 return llc_conn_space(sk, skb) &&
259 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 259 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
@@ -263,9 +263,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
263int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, 263int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
264 struct sk_buff *skb) 264 struct sk_buff *skb)
265{ 265{
266 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 266 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
267 u8 vr = llc_sk(sk)->vR; 267 const u8 vr = llc_sk(sk)->vR;
268 u8 ns = LLC_I_GET_NS(pdu); 268 const u8 ns = LLC_I_GET_NS(pdu);
269 269
270 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 270 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
271 LLC_I_PF_IS_0(pdu) && ns != vr && 271 LLC_I_PF_IS_0(pdu) && ns != vr &&
@@ -275,9 +275,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
275int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, 275int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
276 struct sk_buff *skb) 276 struct sk_buff *skb)
277{ 277{
278 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 278 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
279 u8 vr = llc_sk(sk)->vR; 279 const u8 vr = llc_sk(sk)->vR;
280 u8 ns = LLC_I_GET_NS(pdu); 280 const u8 ns = LLC_I_GET_NS(pdu);
281 281
282 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && 282 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
283 LLC_I_PF_IS_1(pdu) && ns != vr && 283 LLC_I_PF_IS_1(pdu) && ns != vr &&
@@ -287,9 +287,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
287int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, 287int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
288 struct sk_buff *skb) 288 struct sk_buff *skb)
289{ 289{
290 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 290 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
291 u8 vr = llc_sk(sk)->vR; 291 const u8 vr = llc_sk(sk)->vR;
292 u8 ns = LLC_I_GET_NS(pdu); 292 const u8 ns = LLC_I_GET_NS(pdu);
293 293
294 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && 294 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr &&
295 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; 295 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
@@ -298,10 +298,11 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
298int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, 298int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
299 struct sk_buff *skb) 299 struct sk_buff *skb)
300{ 300{
301 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 301 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
302 u8 vr = llc_sk(sk)->vR; 302 const u8 vr = llc_sk(sk)->vR;
303 u8 ns = LLC_I_GET_NS(pdu); 303 const u8 ns = LLC_I_GET_NS(pdu);
304 u16 rc = LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && 304 const u16 rc = LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) &&
305 ns != vr &&
305 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; 306 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
306 if (!rc) 307 if (!rc)
307 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", 308 dprintk("%s: matched, state=%d, ns=%d, vr=%d\n",
@@ -311,7 +312,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
311 312
312int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) 313int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
313{ 314{
314 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 315 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
315 316
316 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 317 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
317 LLC_S_PF_IS_0(pdu) && 318 LLC_S_PF_IS_0(pdu) &&
@@ -320,7 +321,7 @@ int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
320 321
321int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) 322int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
322{ 323{
323 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 324 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
324 325
325 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 326 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
326 LLC_S_PF_IS_1(pdu) && 327 LLC_S_PF_IS_1(pdu) &&
@@ -329,7 +330,7 @@ int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
329 330
330int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) 331int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
331{ 332{
332 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 333 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
333 334
334 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 335 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
335 LLC_S_PF_IS_0(pdu) && 336 LLC_S_PF_IS_0(pdu) &&
@@ -338,7 +339,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
338 339
339int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) 340int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
340{ 341{
341 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 342 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
342 343
343 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 344 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
344 LLC_S_PF_IS_1(pdu) && 345 LLC_S_PF_IS_1(pdu) &&
@@ -347,7 +348,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
347 348
348int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) 349int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
349{ 350{
350 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 351 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
351 352
352 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 353 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
353 LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1; 354 LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1;
@@ -355,7 +356,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
355 356
356int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) 357int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
357{ 358{
358 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 359 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
359 360
360 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 361 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
361 LLC_S_PF_IS_0(pdu) && 362 LLC_S_PF_IS_0(pdu) &&
@@ -364,7 +365,7 @@ int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
364 365
365int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) 366int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
366{ 367{
367 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 368 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
368 369
369 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 370 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
370 LLC_S_PF_IS_1(pdu) && 371 LLC_S_PF_IS_1(pdu) &&
@@ -373,7 +374,7 @@ int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
373 374
374int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) 375int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
375{ 376{
376 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 377 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
377 378
378 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 379 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
379 LLC_S_PF_IS_0(pdu) && 380 LLC_S_PF_IS_0(pdu) &&
@@ -382,7 +383,7 @@ int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
382 383
383int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) 384int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
384{ 385{
385 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 386 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
386 387
387 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 388 return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
388 LLC_S_PF_IS_1(pdu) && 389 LLC_S_PF_IS_1(pdu) &&
@@ -391,7 +392,7 @@ int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
391 392
392int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) 393int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
393{ 394{
394 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 395 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
395 396
396 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 397 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
397 LLC_S_PF_IS_0(pdu) && 398 LLC_S_PF_IS_0(pdu) &&
@@ -400,7 +401,7 @@ int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
400 401
401int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) 402int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
402{ 403{
403 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 404 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
404 405
405 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 406 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
406 LLC_S_PF_IS_1(pdu) && 407 LLC_S_PF_IS_1(pdu) &&
@@ -409,7 +410,7 @@ int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
409 410
410int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) 411int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
411{ 412{
412 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 413 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
413 414
414 return llc_conn_space(sk, skb) && 415 return llc_conn_space(sk, skb) &&
415 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 416 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
@@ -419,7 +420,7 @@ int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
419 420
420int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) 421int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
421{ 422{
422 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 423 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
423 424
424 return llc_conn_space(sk, skb) && 425 return llc_conn_space(sk, skb) &&
425 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && 426 LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) &&
@@ -429,7 +430,7 @@ int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
429 430
430int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) 431int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
431{ 432{
432 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 433 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
433 434
434 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && 435 return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) &&
435 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME ? 0 : 1; 436 LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME ? 0 : 1;
@@ -446,7 +447,7 @@ int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
446int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) 447int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
447{ 448{
448 u16 rc = 1; 449 u16 rc = 1;
449 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 450 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
450 451
451 if (LLC_PDU_IS_CMD(pdu)) { 452 if (LLC_PDU_IS_CMD(pdu)) {
452 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) { 453 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) {
@@ -461,7 +462,7 @@ int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
461int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) 462int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
462{ 463{
463 u16 rc = 1; 464 u16 rc = 1;
464 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 465 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
465 466
466 if (LLC_PDU_IS_CMD(pdu)) { 467 if (LLC_PDU_IS_CMD(pdu)) {
467 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) 468 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu))
@@ -477,32 +478,10 @@ int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
477 return rc; 478 return rc;
478} 479}
479 480
480int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
481{
482 u16 rc = 1;
483 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
484
485 if (LLC_PDU_IS_RSP(pdu)) {
486 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) {
487 if (LLC_I_PF_IS_1(pdu))
488 rc = 0;
489 } else if (LLC_PDU_TYPE_IS_U(pdu))
490 switch (LLC_U_PDU_RSP(pdu)) {
491 case LLC_2_PDU_RSP_UA:
492 case LLC_2_PDU_RSP_DM:
493 case LLC_2_PDU_RSP_FRMR:
494 if (LLC_U_PF_IS_1(pdu))
495 rc = 0;
496 break;
497 }
498 }
499 return rc;
500}
501
502int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) 481int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
503{ 482{
504 u16 rc = 1; 483 u16 rc = 1;
505 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 484 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
506 485
507 if (LLC_PDU_IS_RSP(pdu)) { 486 if (LLC_PDU_IS_RSP(pdu)) {
508 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) 487 if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu))
@@ -524,9 +503,9 @@ int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
524 struct sk_buff *skb) 503 struct sk_buff *skb)
525{ 504{
526 u16 rc = 1; 505 u16 rc = 1;
527 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 506 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
528 u8 vs = llc_sk(sk)->vS; 507 const u8 vs = llc_sk(sk)->vS;
529 u8 nr = LLC_I_GET_NR(pdu); 508 const u8 nr = LLC_I_GET_NR(pdu);
530 509
531 if (LLC_PDU_IS_CMD(pdu) && 510 if (LLC_PDU_IS_CMD(pdu) &&
532 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && 511 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) &&
@@ -542,9 +521,9 @@ int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
542 struct sk_buff *skb) 521 struct sk_buff *skb)
543{ 522{
544 u16 rc = 1; 523 u16 rc = 1;
545 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 524 const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
546 u8 vs = llc_sk(sk)->vS; 525 const u8 vs = llc_sk(sk)->vS;
547 u8 nr = LLC_I_GET_NR(pdu); 526 const u8 nr = LLC_I_GET_NR(pdu);
548 527
549 if (LLC_PDU_IS_RSP(pdu) && 528 if (LLC_PDU_IS_RSP(pdu) &&
550 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && 529 (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) &&
@@ -563,28 +542,28 @@ int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb)
563 542
564int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb) 543int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb)
565{ 544{
566 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 545 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
567 546
568 return ev->type != LLC_CONN_EV_TYPE_P_TMR; 547 return ev->type != LLC_CONN_EV_TYPE_P_TMR;
569} 548}
570 549
571int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb) 550int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb)
572{ 551{
573 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 552 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
574 553
575 return ev->type != LLC_CONN_EV_TYPE_ACK_TMR; 554 return ev->type != LLC_CONN_EV_TYPE_ACK_TMR;
576} 555}
577 556
578int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb) 557int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb)
579{ 558{
580 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 559 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
581 560
582 return ev->type != LLC_CONN_EV_TYPE_REJ_TMR; 561 return ev->type != LLC_CONN_EV_TYPE_REJ_TMR;
583} 562}
584 563
585int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb) 564int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb)
586{ 565{
587 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 566 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
588 567
589 return ev->type != LLC_CONN_EV_TYPE_BUSY_TMR; 568 return ev->type != LLC_CONN_EV_TYPE_BUSY_TMR;
590} 569}
@@ -596,7 +575,7 @@ int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb)
596 575
597int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb) 576int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb)
598{ 577{
599 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 578 const struct llc_conn_state_ev *ev = llc_conn_ev(skb);
600 579
601 return ev->type == LLC_CONN_EV_TYPE_SIMPLE && 580 return ev->type == LLC_CONN_EV_TYPE_SIMPLE &&
602 ev->prim_type == LLC_CONN_EV_TX_BUFF_FULL ? 0 : 1; 581 ev->prim_type == LLC_CONN_EV_TX_BUFF_FULL ? 0 : 1;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 4c644bc70eae..042b24a8ca4c 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -40,6 +40,11 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
40/* Offset table on connection states transition diagram */ 40/* Offset table on connection states transition diagram */
41static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; 41static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
42 42
43int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ;
44int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ;
45int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ;
46int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
47
43/** 48/**
44 * llc_conn_state_process - sends event to connection state machine 49 * llc_conn_state_process - sends event to connection state machine
45 * @sk: connection 50 * @sk: connection
@@ -53,7 +58,7 @@ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
53int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) 58int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
54{ 59{
55 int rc; 60 int rc;
56 struct llc_sock *llc = llc_sk(sk); 61 struct llc_sock *llc = llc_sk(skb->sk);
57 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 62 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
58 63
59 /* 64 /*
@@ -63,13 +68,16 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
63 */ 68 */
64 skb_get(skb); 69 skb_get(skb);
65 ev->ind_prim = ev->cfm_prim = 0; 70 ev->ind_prim = ev->cfm_prim = 0;
66 rc = llc_conn_service(sk, skb); /* sending event to state machine */ 71 /*
67 if (rc) { 72 * Send event to state machine
73 */
74 rc = llc_conn_service(skb->sk, skb);
75 if (unlikely(rc != 0)) {
68 printk(KERN_ERR "%s: llc_conn_service failed\n", __FUNCTION__); 76 printk(KERN_ERR "%s: llc_conn_service failed\n", __FUNCTION__);
69 goto out_kfree_skb; 77 goto out_kfree_skb;
70 } 78 }
71 79
72 if (!ev->ind_prim && !ev->cfm_prim) { 80 if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
73 /* indicate or confirm not required */ 81 /* indicate or confirm not required */
74 /* XXX this is not very pretty, perhaps we should store 82 /* XXX this is not very pretty, perhaps we should store
75 * XXX indicate/confirm-needed state in the llc_conn_state_ev 83 * XXX indicate/confirm-needed state in the llc_conn_state_ev
@@ -80,13 +88,13 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
80 goto out_skb_put; 88 goto out_skb_put;
81 } 89 }
82 90
83 if (ev->ind_prim && ev->cfm_prim) /* Paranoia */ 91 if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
84 skb_get(skb); 92 skb_get(skb);
85 93
86 switch (ev->ind_prim) { 94 switch (ev->ind_prim) {
87 case LLC_DATA_PRIM: 95 case LLC_DATA_PRIM:
88 llc_save_primitive(skb, LLC_DATA_PRIM); 96 llc_save_primitive(sk, skb, LLC_DATA_PRIM);
89 if (sock_queue_rcv_skb(sk, skb)) { 97 if (unlikely(sock_queue_rcv_skb(sk, skb))) {
90 /* 98 /*
91 * shouldn't happen 99 * shouldn't happen
92 */ 100 */
@@ -95,13 +103,14 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
95 kfree_skb(skb); 103 kfree_skb(skb);
96 } 104 }
97 break; 105 break;
98 case LLC_CONN_PRIM: { 106 case LLC_CONN_PRIM:
99 struct sock *parent = skb->sk; 107 /*
100 108 * Can't be sock_queue_rcv_skb, because we have to leave the
101 skb->sk = sk; 109 * skb->sk pointing to the newly created struct sock in
102 skb_queue_tail(&parent->sk_receive_queue, skb); 110 * llc_conn_handler. -acme
103 sk->sk_state_change(parent); 111 */
104 } 112 skb_queue_tail(&sk->sk_receive_queue, skb);
113 sk->sk_state_change(sk);
105 break; 114 break;
106 case LLC_DISC_PRIM: 115 case LLC_DISC_PRIM:
107 sock_hold(sk); 116 sock_hold(sk);
@@ -111,8 +120,8 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
111 sk->sk_socket->state = SS_UNCONNECTED; 120 sk->sk_socket->state = SS_UNCONNECTED;
112 sk->sk_state = TCP_CLOSE; 121 sk->sk_state = TCP_CLOSE;
113 if (!sock_flag(sk, SOCK_DEAD)) { 122 if (!sock_flag(sk, SOCK_DEAD)) {
114 sk->sk_state_change(sk);
115 sock_set_flag(sk, SOCK_DEAD); 123 sock_set_flag(sk, SOCK_DEAD);
124 sk->sk_state_change(sk);
116 } 125 }
117 } 126 }
118 kfree_skb(skb); 127 kfree_skb(skb);
@@ -465,7 +474,7 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
465} 474}
466 475
467/** 476/**
468 * llc_lookup_established - Finds connection for the remote/local sap/mac 477 * __llc_lookup_established - Finds connection for the remote/local sap/mac
469 * @sap: SAP 478 * @sap: SAP
470 * @daddr: address of remote LLC (MAC + SAP) 479 * @daddr: address of remote LLC (MAC + SAP)
471 * @laddr: address of local LLC (MAC + SAP) 480 * @laddr: address of local LLC (MAC + SAP)
@@ -473,14 +482,16 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
473 * Search connection list of the SAP and finds connection using the remote 482 * Search connection list of the SAP and finds connection using the remote
474 * mac, remote sap, local mac, and local sap. Returns pointer for 483 * mac, remote sap, local mac, and local sap. Returns pointer for
475 * connection found, %NULL otherwise. 484 * connection found, %NULL otherwise.
485 * Caller has to make sure local_bh is disabled.
476 */ 486 */
477struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, 487static struct sock *__llc_lookup_established(struct llc_sap *sap,
478 struct llc_addr *laddr) 488 struct llc_addr *daddr,
489 struct llc_addr *laddr)
479{ 490{
480 struct sock *rc; 491 struct sock *rc;
481 struct hlist_node *node; 492 struct hlist_node *node;
482 493
483 read_lock_bh(&sap->sk_list.lock); 494 read_lock(&sap->sk_list.lock);
484 sk_for_each(rc, node, &sap->sk_list.list) { 495 sk_for_each(rc, node, &sap->sk_list.list) {
485 struct llc_sock *llc = llc_sk(rc); 496 struct llc_sock *llc = llc_sk(rc);
486 497
@@ -494,10 +505,22 @@ struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
494 } 505 }
495 rc = NULL; 506 rc = NULL;
496found: 507found:
497 read_unlock_bh(&sap->sk_list.lock); 508 read_unlock(&sap->sk_list.lock);
498 return rc; 509 return rc;
499} 510}
500 511
512struct sock *llc_lookup_established(struct llc_sap *sap,
513 struct llc_addr *daddr,
514 struct llc_addr *laddr)
515{
516 struct sock *sk;
517
518 local_bh_disable();
519 sk = __llc_lookup_established(sap, daddr, laddr);
520 local_bh_enable();
521 return sk;
522}
523
501/** 524/**
502 * llc_lookup_listener - Finds listener for local MAC + SAP 525 * llc_lookup_listener - Finds listener for local MAC + SAP
503 * @sap: SAP 526 * @sap: SAP
@@ -506,6 +529,7 @@ found:
506 * Search connection list of the SAP and finds connection listening on 529 * Search connection list of the SAP and finds connection listening on
507 * local mac, and local sap. Returns pointer for parent socket found, 530 * local mac, and local sap. Returns pointer for parent socket found,
508 * %NULL otherwise. 531 * %NULL otherwise.
532 * Caller has to make sure local_bh is disabled.
509 */ 533 */
510static struct sock *llc_lookup_listener(struct llc_sap *sap, 534static struct sock *llc_lookup_listener(struct llc_sap *sap,
511 struct llc_addr *laddr) 535 struct llc_addr *laddr)
@@ -513,7 +537,7 @@ static struct sock *llc_lookup_listener(struct llc_sap *sap,
513 struct sock *rc; 537 struct sock *rc;
514 struct hlist_node *node; 538 struct hlist_node *node;
515 539
516 read_lock_bh(&sap->sk_list.lock); 540 read_lock(&sap->sk_list.lock);
517 sk_for_each(rc, node, &sap->sk_list.list) { 541 sk_for_each(rc, node, &sap->sk_list.list) {
518 struct llc_sock *llc = llc_sk(rc); 542 struct llc_sock *llc = llc_sk(rc);
519 543
@@ -527,10 +551,19 @@ static struct sock *llc_lookup_listener(struct llc_sap *sap,
527 } 551 }
528 rc = NULL; 552 rc = NULL;
529found: 553found:
530 read_unlock_bh(&sap->sk_list.lock); 554 read_unlock(&sap->sk_list.lock);
531 return rc; 555 return rc;
532} 556}
533 557
558static struct sock *__llc_lookup(struct llc_sap *sap,
559 struct llc_addr *daddr,
560 struct llc_addr *laddr)
561{
562 struct sock *sk = __llc_lookup_established(sap, daddr, laddr);
563
564 return sk ? : llc_lookup_listener(sap, laddr);
565}
566
534/** 567/**
535 * llc_data_accept_state - designates if in this state data can be sent. 568 * llc_data_accept_state - designates if in this state data can be sent.
536 * @state: state of connection. 569 * @state: state of connection.
@@ -544,14 +577,14 @@ u8 llc_data_accept_state(u8 state)
544} 577}
545 578
546/** 579/**
547 * find_next_offset - finds offset for next category of transitions 580 * llc_find_next_offset - finds offset for next category of transitions
548 * @state: state table. 581 * @state: state table.
549 * @offset: start offset. 582 * @offset: start offset.
550 * 583 *
551 * Finds offset of next category of transitions in transition table. 584 * Finds offset of next category of transitions in transition table.
552 * Returns the start index of next category. 585 * Returns the start index of next category.
553 */ 586 */
554static u16 find_next_offset(struct llc_conn_state *state, u16 offset) 587static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset)
555{ 588{
556 u16 cnt = 0; 589 u16 cnt = 0;
557 struct llc_conn_state_trans **next_trans; 590 struct llc_conn_state_trans **next_trans;
@@ -578,8 +611,8 @@ void __init llc_build_offset_table(void)
578 next_offset = 0; 611 next_offset = 0;
579 for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) { 612 for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) {
580 llc_offset_table[state][ev_type] = next_offset; 613 llc_offset_table[state][ev_type] = next_offset;
581 next_offset += find_next_offset(curr_state, 614 next_offset += llc_find_next_offset(curr_state,
582 next_offset) + 1; 615 next_offset) + 1;
583 } 616 }
584 } 617 }
585} 618}
@@ -623,6 +656,7 @@ static int llc_find_offset(int state, int ev_type)
623 */ 656 */
624void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) 657void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
625{ 658{
659 llc_sap_hold(sap);
626 write_lock_bh(&sap->sk_list.lock); 660 write_lock_bh(&sap->sk_list.lock);
627 llc_sk(sk)->sap = sap; 661 llc_sk(sk)->sap = sap;
628 sk_add_node(sk, &sap->sk_list.list); 662 sk_add_node(sk, &sap->sk_list.list);
@@ -642,6 +676,7 @@ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
642 write_lock_bh(&sap->sk_list.lock); 676 write_lock_bh(&sap->sk_list.lock);
643 sk_del_node_init(sk); 677 sk_del_node_init(sk);
644 write_unlock_bh(&sap->sk_list.lock); 678 write_unlock_bh(&sap->sk_list.lock);
679 llc_sap_put(sap);
645} 680}
646 681
647/** 682/**
@@ -654,15 +689,34 @@ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
654static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb) 689static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb)
655{ 690{
656 struct llc_conn_state_ev *ev = llc_conn_ev(skb); 691 struct llc_conn_state_ev *ev = llc_conn_ev(skb);
657 struct llc_sock *llc = llc_sk(sk);
658 692
659 if (!llc->dev)
660 llc->dev = skb->dev;
661 ev->type = LLC_CONN_EV_TYPE_PDU; 693 ev->type = LLC_CONN_EV_TYPE_PDU;
662 ev->reason = 0; 694 ev->reason = 0;
663 return llc_conn_state_process(sk, skb); 695 return llc_conn_state_process(sk, skb);
664} 696}
665 697
698static struct sock *llc_create_incoming_sock(struct sock *sk,
699 struct net_device *dev,
700 struct llc_addr *saddr,
701 struct llc_addr *daddr)
702{
703 struct sock *newsk = llc_sk_alloc(sk->sk_family, GFP_ATOMIC,
704 sk->sk_prot);
705 struct llc_sock *newllc, *llc = llc_sk(sk);
706
707 if (!newsk)
708 goto out;
709 newllc = llc_sk(newsk);
710 memcpy(&newllc->laddr, daddr, sizeof(newllc->laddr));
711 memcpy(&newllc->daddr, saddr, sizeof(newllc->daddr));
712 newllc->dev = dev;
713 dev_hold(dev);
714 llc_sap_add_socket(llc->sap, newsk);
715 llc_sap_hold(llc->sap);
716out:
717 return newsk;
718}
719
666void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) 720void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
667{ 721{
668 struct llc_addr saddr, daddr; 722 struct llc_addr saddr, daddr;
@@ -673,35 +727,35 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
673 llc_pdu_decode_da(skb, daddr.mac); 727 llc_pdu_decode_da(skb, daddr.mac);
674 llc_pdu_decode_dsap(skb, &daddr.lsap); 728 llc_pdu_decode_dsap(skb, &daddr.lsap);
675 729
676 sk = llc_lookup_established(sap, &saddr, &daddr); 730 sk = __llc_lookup(sap, &saddr, &daddr);
677 if (!sk) { 731 if (!sk)
732 goto drop;
733
734 bh_lock_sock(sk);
735 /*
736 * This has to be done here and not at the upper layer ->accept
737 * method because of the way the PROCOM state machine works:
738 * it needs to set several state variables (see, for instance,
739 * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
740 * the originator of the new connection, and this state has to be
741 * in the newly created struct sock private area. -acme
742 */
743 if (unlikely(sk->sk_state == TCP_LISTEN)) {
744 struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
745 &saddr, &daddr);
746 if (!newsk)
747 goto drop_unlock;
748 skb_set_owner_r(skb, newsk);
749 } else {
678 /* 750 /*
679 * Didn't find an active connection; verify if there 751 * Can't be skb_set_owner_r, this will be done at the
680 * is a listening socket for this llc addr 752 * llc_conn_state_process function, later on, when we will use
753 * skb_queue_rcv_skb to send it to upper layers, this is
754 * another trick required to cope with how the PROCOM state
755 * machine works. -acme
681 */ 756 */
682 struct llc_sock *llc;
683 struct sock *parent = llc_lookup_listener(sap, &daddr);
684
685 if (!parent) {
686 dprintk("llc_lookup_listener failed!\n");
687 goto drop;
688 }
689
690 sk = llc_sk_alloc(parent->sk_family, GFP_ATOMIC, parent->sk_prot);
691 if (!sk) {
692 sock_put(parent);
693 goto drop;
694 }
695 llc = llc_sk(sk);
696 memcpy(&llc->laddr, &daddr, sizeof(llc->laddr));
697 memcpy(&llc->daddr, &saddr, sizeof(llc->daddr));
698 llc_sap_add_socket(sap, sk);
699 sock_hold(sk);
700 sock_put(parent);
701 skb->sk = parent;
702 } else
703 skb->sk = sk; 757 skb->sk = sk;
704 bh_lock_sock(sk); 758 }
705 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
706 llc_conn_rcv(sk, skb); 760 llc_conn_rcv(sk, skb);
707 else { 761 else {
@@ -709,11 +763,16 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
709 llc_set_backlog_type(skb, LLC_PACKET); 763 llc_set_backlog_type(skb, LLC_PACKET);
710 sk_add_backlog(sk, skb); 764 sk_add_backlog(sk, skb);
711 } 765 }
766out:
712 bh_unlock_sock(sk); 767 bh_unlock_sock(sk);
713 sock_put(sk); 768 sock_put(sk);
714 return; 769 return;
715drop: 770drop:
716 kfree_skb(skb); 771 kfree_skb(skb);
772 return;
773drop_unlock:
774 kfree_skb(skb);
775 goto out;
717} 776}
718 777
719#undef LLC_REFCNT_DEBUG 778#undef LLC_REFCNT_DEBUG
@@ -722,32 +781,6 @@ static atomic_t llc_sock_nr;
722#endif 781#endif
723 782
724/** 783/**
725 * llc_release_sockets - releases all sockets in a sap
726 * @sap: sap to release its sockets
727 *
728 * Releases all connections of a sap. Returns 0 if all actions complete
729 * successfully, nonzero otherwise
730 */
731int llc_release_sockets(struct llc_sap *sap)
732{
733 int rc = 0;
734 struct sock *sk;
735 struct hlist_node *node;
736
737 write_lock_bh(&sap->sk_list.lock);
738
739 sk_for_each(sk, node, &sap->sk_list.list) {
740 llc_sk(sk)->state = LLC_CONN_STATE_TEMP;
741
742 if (llc_send_disc(sk))
743 rc = 1;
744 }
745
746 write_unlock_bh(&sap->sk_list.lock);
747 return rc;
748}
749
750/**
751 * llc_backlog_rcv - Processes rx frames and expired timers. 784 * llc_backlog_rcv - Processes rx frames and expired timers.
752 * @sk: LLC sock (p8022 connection) 785 * @sk: LLC sock (p8022 connection)
753 * @skb: queued rx frame or event 786 * @skb: queued rx frame or event
@@ -762,14 +795,14 @@ static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
762 int rc = 0; 795 int rc = 0;
763 struct llc_sock *llc = llc_sk(sk); 796 struct llc_sock *llc = llc_sk(sk);
764 797
765 if (llc_backlog_type(skb) == LLC_PACKET) { 798 if (likely(llc_backlog_type(skb) == LLC_PACKET)) {
766 if (llc->state > 1) /* not closed */ 799 if (likely(llc->state > 1)) /* not closed */
767 rc = llc_conn_rcv(sk, skb); 800 rc = llc_conn_rcv(sk, skb);
768 else 801 else
769 goto out_kfree_skb; 802 goto out_kfree_skb;
770 } else if (llc_backlog_type(skb) == LLC_EVENT) { 803 } else if (llc_backlog_type(skb) == LLC_EVENT) {
771 /* timer expiration event */ 804 /* timer expiration event */
772 if (llc->state > 1) /* not closed */ 805 if (likely(llc->state > 1)) /* not closed */
773 rc = llc_conn_state_process(sk, skb); 806 rc = llc_conn_state_process(sk, skb);
774 else 807 else
775 goto out_kfree_skb; 808 goto out_kfree_skb;
@@ -799,22 +832,22 @@ static void llc_sk_init(struct sock* sk)
799 llc->dec_step = llc->connect_step = 1; 832 llc->dec_step = llc->connect_step = 1;
800 833
801 init_timer(&llc->ack_timer.timer); 834 init_timer(&llc->ack_timer.timer);
802 llc->ack_timer.expire = LLC_ACK_TIME; 835 llc->ack_timer.expire = sysctl_llc2_ack_timeout;
803 llc->ack_timer.timer.data = (unsigned long)sk; 836 llc->ack_timer.timer.data = (unsigned long)sk;
804 llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; 837 llc->ack_timer.timer.function = llc_conn_ack_tmr_cb;
805 838
806 init_timer(&llc->pf_cycle_timer.timer); 839 init_timer(&llc->pf_cycle_timer.timer);
807 llc->pf_cycle_timer.expire = LLC_P_TIME; 840 llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout;
808 llc->pf_cycle_timer.timer.data = (unsigned long)sk; 841 llc->pf_cycle_timer.timer.data = (unsigned long)sk;
809 llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb; 842 llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb;
810 843
811 init_timer(&llc->rej_sent_timer.timer); 844 init_timer(&llc->rej_sent_timer.timer);
812 llc->rej_sent_timer.expire = LLC_REJ_TIME; 845 llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout;
813 llc->rej_sent_timer.timer.data = (unsigned long)sk; 846 llc->rej_sent_timer.timer.data = (unsigned long)sk;
814 llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb; 847 llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb;
815 848
816 init_timer(&llc->busy_state_timer.timer); 849 init_timer(&llc->busy_state_timer.timer);
817 llc->busy_state_timer.expire = LLC_BUSY_TIME; 850 llc->busy_state_timer.expire = sysctl_llc2_busy_timeout;
818 llc->busy_state_timer.timer.data = (unsigned long)sk; 851 llc->busy_state_timer.timer.data = (unsigned long)sk;
819 llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb; 852 llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb;
820 853
@@ -834,7 +867,8 @@ static void llc_sk_init(struct sock* sk)
834 * Allocates a LLC sock and initializes it. Returns the new LLC sock 867 * Allocates a LLC sock and initializes it. Returns the new LLC sock
835 * or %NULL if there's no memory available for one 868 * or %NULL if there's no memory available for one
836 */ 869 */
837struct sock *llc_sk_alloc(int family, int priority, struct proto *prot) 870struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
871 struct proto *prot)
838{ 872{
839 struct sock *sk = sk_alloc(family, priority, prot, 1); 873 struct sock *sk = sk_alloc(family, priority, prot, 1);
840 874
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 9727455bf0e7..ab0fcd32fd84 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -40,6 +40,7 @@ static struct llc_sap *llc_sap_alloc(void)
40 sap->state = LLC_SAP_STATE_ACTIVE; 40 sap->state = LLC_SAP_STATE_ACTIVE;
41 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 41 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN);
42 rwlock_init(&sap->sk_list.lock); 42 rwlock_init(&sap->sk_list.lock);
43 atomic_set(&sap->refcnt, 1);
43 } 44 }
44 return sap; 45 return sap;
45} 46}
@@ -52,9 +53,7 @@ static struct llc_sap *llc_sap_alloc(void)
52 */ 53 */
53static void llc_add_sap(struct llc_sap *sap) 54static void llc_add_sap(struct llc_sap *sap)
54{ 55{
55 write_lock_bh(&llc_sap_list_lock);
56 list_add_tail(&sap->node, &llc_sap_list); 56 list_add_tail(&sap->node, &llc_sap_list);
57 write_unlock_bh(&llc_sap_list_lock);
58} 57}
59 58
60/** 59/**
@@ -70,11 +69,25 @@ static void llc_del_sap(struct llc_sap *sap)
70 write_unlock_bh(&llc_sap_list_lock); 69 write_unlock_bh(&llc_sap_list_lock);
71} 70}
72 71
72static struct llc_sap *__llc_sap_find(unsigned char sap_value)
73{
74 struct llc_sap* sap;
75
76 list_for_each_entry(sap, &llc_sap_list, node)
77 if (sap->laddr.lsap == sap_value)
78 goto out;
79 sap = NULL;
80out:
81 return sap;
82}
83
73/** 84/**
74 * llc_sap_find - searchs a SAP in station 85 * llc_sap_find - searchs a SAP in station
75 * @sap_value: sap to be found 86 * @sap_value: sap to be found
76 * 87 *
77 * Searchs for a sap in the sap list of the LLC's station upon the sap ID. 88 * Searchs for a sap in the sap list of the LLC's station upon the sap ID.
89 * If the sap is found it will be refcounted and the user will have to do
90 * a llc_sap_put after use.
78 * Returns the sap or %NULL if not found. 91 * Returns the sap or %NULL if not found.
79 */ 92 */
80struct llc_sap *llc_sap_find(unsigned char sap_value) 93struct llc_sap *llc_sap_find(unsigned char sap_value)
@@ -82,11 +95,9 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
82 struct llc_sap* sap; 95 struct llc_sap* sap;
83 96
84 read_lock_bh(&llc_sap_list_lock); 97 read_lock_bh(&llc_sap_list_lock);
85 list_for_each_entry(sap, &llc_sap_list, node) 98 sap = __llc_sap_find(sap_value);
86 if (sap->laddr.lsap == sap_value) 99 if (sap)
87 goto out; 100 llc_sap_hold(sap);
88 sap = NULL;
89out:
90 read_unlock_bh(&llc_sap_list_lock); 101 read_unlock_bh(&llc_sap_list_lock);
91 return sap; 102 return sap;
92} 103}
@@ -106,19 +117,20 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
106 struct packet_type *pt, 117 struct packet_type *pt,
107 struct net_device *orig_dev)) 118 struct net_device *orig_dev))
108{ 119{
109 struct llc_sap *sap = llc_sap_find(lsap); 120 struct llc_sap *sap = NULL;
110 121
111 if (sap) { /* SAP already exists */ 122 write_lock_bh(&llc_sap_list_lock);
112 sap = NULL; 123 if (__llc_sap_find(lsap)) /* SAP already exists */
113 goto out; 124 goto out;
114 }
115 sap = llc_sap_alloc(); 125 sap = llc_sap_alloc();
116 if (!sap) 126 if (!sap)
117 goto out; 127 goto out;
118 sap->laddr.lsap = lsap; 128 sap->laddr.lsap = lsap;
119 sap->rcv_func = func; 129 sap->rcv_func = func;
130 llc_sap_hold(sap);
120 llc_add_sap(sap); 131 llc_add_sap(sap);
121out: 132out:
133 write_unlock_bh(&llc_sap_list_lock);
122 return sap; 134 return sap;
123} 135}
124 136
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 0f84f66018e4..ba90f7f0801a 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -47,14 +47,11 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
47 int rc = -ECONNABORTED; 47 int rc = -ECONNABORTED;
48 struct llc_sock *llc = llc_sk(sk); 48 struct llc_sock *llc = llc_sk(sk);
49 49
50 if (llc->state == LLC_CONN_STATE_ADM) 50 if (unlikely(llc->state == LLC_CONN_STATE_ADM))
51 goto out; 51 goto out;
52 rc = -EBUSY; 52 rc = -EBUSY;
53 if (llc_data_accept_state(llc->state)) { /* data_conn_refuse */ 53 if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
54 llc->failed_data_req = 1; 54 llc->p_flag)) {
55 goto out;
56 }
57 if (llc->p_flag) {
58 llc->failed_data_req = 1; 55 llc->failed_data_req = 1;
59 goto out; 56 goto out;
60 } 57 }
@@ -110,6 +107,7 @@ int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap)
110 ev->type = LLC_CONN_EV_TYPE_PRIM; 107 ev->type = LLC_CONN_EV_TYPE_PRIM;
111 ev->prim = LLC_CONN_PRIM; 108 ev->prim = LLC_CONN_PRIM;
112 ev->prim_type = LLC_PRIM_TYPE_REQ; 109 ev->prim_type = LLC_PRIM_TYPE_REQ;
110 skb_set_owner_w(skb, sk);
113 rc = llc_conn_state_process(sk, skb); 111 rc = llc_conn_state_process(sk, skb);
114 } 112 }
115out_put: 113out_put:
@@ -144,6 +142,7 @@ int llc_send_disc(struct sock *sk)
144 skb = alloc_skb(0, GFP_ATOMIC); 142 skb = alloc_skb(0, GFP_ATOMIC);
145 if (!skb) 143 if (!skb)
146 goto out; 144 goto out;
145 skb_set_owner_w(skb, sk);
147 sk->sk_state = TCP_CLOSING; 146 sk->sk_state = TCP_CLOSING;
148 ev = llc_conn_ev(skb); 147 ev = llc_conn_ev(skb);
149 ev->type = LLC_CONN_EV_TYPE_PRIM; 148 ev->type = LLC_CONN_EV_TYPE_PRIM;
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 13b46240b7a1..8f3addf0724c 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -99,15 +99,19 @@ out:
99static inline int llc_fixup_skb(struct sk_buff *skb) 99static inline int llc_fixup_skb(struct sk_buff *skb)
100{ 100{
101 u8 llc_len = 2; 101 u8 llc_len = 2;
102 struct llc_pdu_sn *pdu; 102 struct llc_pdu_un *pdu;
103 103
104 if (!pskb_may_pull(skb, sizeof(*pdu))) 104 if (unlikely(!pskb_may_pull(skb, sizeof(*pdu))))
105 return 0; 105 return 0;
106 106
107 pdu = (struct llc_pdu_sn *)skb->data; 107 pdu = (struct llc_pdu_un *)skb->data;
108 if ((pdu->ctrl_1 & LLC_PDU_TYPE_MASK) == LLC_PDU_TYPE_U) 108 if ((pdu->ctrl_1 & LLC_PDU_TYPE_MASK) == LLC_PDU_TYPE_U)
109 llc_len = 1; 109 llc_len = 1;
110 llc_len += 2; 110 llc_len += 2;
111
112 if (unlikely(!pskb_may_pull(skb, llc_len)))
113 return 0;
114
111 skb->h.raw += llc_len; 115 skb->h.raw += llc_len;
112 skb_pull(skb, llc_len); 116 skb_pull(skb, llc_len);
113 if (skb->protocol == htons(ETH_P_802_2)) { 117 if (skb->protocol == htons(ETH_P_802_2)) {
@@ -166,17 +170,22 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
166 */ 170 */
167 if (sap->rcv_func) { 171 if (sap->rcv_func) {
168 sap->rcv_func(skb, dev, pt, orig_dev); 172 sap->rcv_func(skb, dev, pt, orig_dev);
169 goto out; 173 goto out_put;
170 } 174 }
171 dest = llc_pdu_type(skb); 175 dest = llc_pdu_type(skb);
172 if (unlikely(!dest || !llc_type_handlers[dest - 1])) 176 if (unlikely(!dest || !llc_type_handlers[dest - 1]))
173 goto drop; 177 goto drop_put;
174 llc_type_handlers[dest - 1](sap, skb); 178 llc_type_handlers[dest - 1](sap, skb);
179out_put:
180 llc_sap_put(sap);
175out: 181out:
176 return 0; 182 return 0;
177drop: 183drop:
178 kfree_skb(skb); 184 kfree_skb(skb);
179 goto out; 185 goto out;
186drop_put:
187 kfree_skb(skb);
188 goto out_put;
180handle_station: 189handle_station:
181 if (!llc_station_handler) 190 if (!llc_station_handler)
182 goto drop; 191 goto drop;
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index ab5784cf163e..b4d55b6abb67 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -98,7 +98,7 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
98 dsap, LLC_PDU_CMD); 98 dsap, LLC_PDU_CMD);
99 llc_pdu_init_as_ui_cmd(skb); 99 llc_pdu_init_as_ui_cmd(skb);
100 rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); 100 rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
101 if (!rc) 101 if (likely(!rc))
102 rc = dev_queue_xmit(skb); 102 rc = dev_queue_xmit(skb);
103 return rc; 103 return rc;
104} 104}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 36e8db3fa1a2..bd531cb235a7 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -134,7 +134,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
134 llc_ui_format_mac(seq, llc->daddr.mac); 134 llc_ui_format_mac(seq, llc->daddr.mac);
135 seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, 135 seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
136 atomic_read(&sk->sk_wmem_alloc), 136 atomic_read(&sk->sk_wmem_alloc),
137 atomic_read(&sk->sk_rmem_alloc), 137 atomic_read(&sk->sk_rmem_alloc) - llc->copied_seq,
138 sk->sk_state, 138 sk->sk_state,
139 sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, 139 sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
140 llc->link); 140 llc->link);
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index ed8ba7de6122..bb3580fb8cfe 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -58,7 +58,7 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
58 ev->daddr.lsap, LLC_PDU_CMD); 58 ev->daddr.lsap, LLC_PDU_CMD);
59 llc_pdu_init_as_ui_cmd(skb); 59 llc_pdu_init_as_ui_cmd(skb);
60 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 60 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
61 if (!rc) 61 if (likely(!rc))
62 rc = dev_queue_xmit(skb); 62 rc = dev_queue_xmit(skb);
63 return rc; 63 return rc;
64} 64}
@@ -81,7 +81,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
81 ev->daddr.lsap, LLC_PDU_CMD); 81 ev->daddr.lsap, LLC_PDU_CMD);
82 llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); 82 llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
83 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 83 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
84 if (!rc) 84 if (likely(!rc))
85 rc = dev_queue_xmit(skb); 85 rc = dev_queue_xmit(skb);
86 return rc; 86 return rc;
87} 87}
@@ -103,15 +103,14 @@ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb)
103 llc_pdu_decode_sa(skb, mac_da); 103 llc_pdu_decode_sa(skb, mac_da);
104 llc_pdu_decode_da(skb, mac_sa); 104 llc_pdu_decode_da(skb, mac_sa);
105 llc_pdu_decode_ssap(skb, &dsap); 105 llc_pdu_decode_ssap(skb, &dsap);
106 nskb = llc_alloc_frame(); 106 nskb = llc_alloc_frame(NULL, skb->dev);
107 if (!nskb) 107 if (!nskb)
108 goto out; 108 goto out;
109 nskb->dev = skb->dev;
110 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, 109 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
111 LLC_PDU_RSP); 110 LLC_PDU_RSP);
112 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); 111 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0);
113 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); 112 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
114 if (!rc) 113 if (likely(!rc))
115 rc = dev_queue_xmit(nskb); 114 rc = dev_queue_xmit(nskb);
116out: 115out:
117 return rc; 116 return rc;
@@ -135,7 +134,7 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
135 ev->daddr.lsap, LLC_PDU_CMD); 134 ev->daddr.lsap, LLC_PDU_CMD);
136 llc_pdu_init_as_test_cmd(skb); 135 llc_pdu_init_as_test_cmd(skb);
137 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 136 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
138 if (!rc) 137 if (likely(!rc))
139 rc = dev_queue_xmit(skb); 138 rc = dev_queue_xmit(skb);
140 return rc; 139 return rc;
141} 140}
@@ -149,15 +148,14 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
149 llc_pdu_decode_sa(skb, mac_da); 148 llc_pdu_decode_sa(skb, mac_da);
150 llc_pdu_decode_da(skb, mac_sa); 149 llc_pdu_decode_da(skb, mac_sa);
151 llc_pdu_decode_ssap(skb, &dsap); 150 llc_pdu_decode_ssap(skb, &dsap);
152 nskb = llc_alloc_frame(); 151 nskb = llc_alloc_frame(NULL, skb->dev);
153 if (!nskb) 152 if (!nskb)
154 goto out; 153 goto out;
155 nskb->dev = skb->dev;
156 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, 154 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
157 LLC_PDU_RSP); 155 LLC_PDU_RSP);
158 llc_pdu_init_as_test_rsp(nskb, skb); 156 llc_pdu_init_as_test_rsp(nskb, skb);
159 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); 157 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
160 if (!rc) 158 if (likely(!rc))
161 rc = dev_queue_xmit(nskb); 159 rc = dev_queue_xmit(nskb);
162out: 160out:
163 return rc; 161 return rc;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 34228ef14985..4029ceee9b91 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -26,11 +26,12 @@
26 26
27/** 27/**
28 * llc_alloc_frame - allocates sk_buff for frame 28 * llc_alloc_frame - allocates sk_buff for frame
29 * @dev: network device this skb will be sent over
29 * 30 *
30 * Allocates an sk_buff for frame and initializes sk_buff fields. 31 * Allocates an sk_buff for frame and initializes sk_buff fields.
31 * Returns allocated skb or %NULL when out of memory. 32 * Returns allocated skb or %NULL when out of memory.
32 */ 33 */
33struct sk_buff *llc_alloc_frame(void) 34struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev)
34{ 35{
35 struct sk_buff *skb = alloc_skb(128, GFP_ATOMIC); 36 struct sk_buff *skb = alloc_skb(128, GFP_ATOMIC);
36 37
@@ -38,18 +39,23 @@ struct sk_buff *llc_alloc_frame(void)
38 skb_reserve(skb, 50); 39 skb_reserve(skb, 50);
39 skb->nh.raw = skb->h.raw = skb->data; 40 skb->nh.raw = skb->h.raw = skb->data;
40 skb->protocol = htons(ETH_P_802_2); 41 skb->protocol = htons(ETH_P_802_2);
41 skb->dev = dev_base->next; 42 skb->dev = dev;
42 skb->mac.raw = skb->head; 43 skb->mac.raw = skb->head;
44 if (sk != NULL)
45 skb_set_owner_w(skb, sk);
43 } 46 }
44 return skb; 47 return skb;
45} 48}
46 49
47void llc_save_primitive(struct sk_buff* skb, u8 prim) 50void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim)
48{ 51{
49 struct sockaddr_llc *addr = llc_ui_skb_cb(skb); 52 struct sockaddr_llc *addr;
50 53
54 if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */
55 return;
51 /* save primitive for use by the user. */ 56 /* save primitive for use by the user. */
52 addr->sllc_family = skb->sk->sk_family; 57 addr = llc_ui_skb_cb(skb);
58 addr->sllc_family = sk->sk_family;
53 addr->sllc_arphrd = skb->dev->type; 59 addr->sllc_arphrd = skb->dev->type;
54 addr->sllc_test = prim == LLC_TEST_PRIM; 60 addr->sllc_test = prim == LLC_TEST_PRIM;
55 addr->sllc_xid = prim == LLC_XID_PRIM; 61 addr->sllc_xid = prim == LLC_XID_PRIM;
@@ -189,7 +195,7 @@ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
189 if (skb->sk->sk_state == TCP_LISTEN) 195 if (skb->sk->sk_state == TCP_LISTEN)
190 kfree_skb(skb); 196 kfree_skb(skb);
191 else { 197 else {
192 llc_save_primitive(skb, ev->prim); 198 llc_save_primitive(skb->sk, skb, ev->prim);
193 199
194 /* queue skb to the user. */ 200 /* queue skb to the user. */
195 if (sock_queue_rcv_skb(skb->sk, skb)) 201 if (sock_queue_rcv_skb(skb->sk, skb))
@@ -308,7 +314,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
308 314
309 sk = llc_lookup_dgram(sap, &laddr); 315 sk = llc_lookup_dgram(sap, &laddr);
310 if (sk) { 316 if (sk) {
311 skb->sk = sk; 317 skb_set_owner_r(skb, sk);
312 llc_sap_rcv(sap, skb); 318 llc_sap_rcv(sap, skb);
313 sock_put(sk); 319 sock_put(sk);
314 } else 320 } else
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 8fe48a24bad5..f37dbf8ef126 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -50,6 +50,10 @@ struct llc_station {
50 struct sk_buff_head mac_pdu_q; 50 struct sk_buff_head mac_pdu_q;
51}; 51};
52 52
53#define LLC_STATION_ACK_TIME (3 * HZ)
54
55int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME;
56
53/* Types of events (possible values in 'ev->type') */ 57/* Types of events (possible values in 'ev->type') */
54#define LLC_STATION_EV_TYPE_SIMPLE 1 58#define LLC_STATION_EV_TYPE_SIMPLE 1
55#define LLC_STATION_EV_TYPE_CONDITION 2 59#define LLC_STATION_EV_TYPE_CONDITION 2
@@ -218,7 +222,8 @@ static void llc_station_send_pdu(struct sk_buff *skb)
218 222
219static int llc_station_ac_start_ack_timer(struct sk_buff *skb) 223static int llc_station_ac_start_ack_timer(struct sk_buff *skb)
220{ 224{
221 mod_timer(&llc_main_station.ack_timer, jiffies + LLC_ACK_TIME * HZ); 225 mod_timer(&llc_main_station.ack_timer,
226 jiffies + sysctl_llc_station_ack_timeout);
222 return 0; 227 return 0;
223} 228}
224 229
@@ -249,14 +254,14 @@ static int llc_station_ac_inc_xid_r_cnt_by_1(struct sk_buff *skb)
249static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) 254static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
250{ 255{
251 int rc = 1; 256 int rc = 1;
252 struct sk_buff *nskb = llc_alloc_frame(); 257 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev);
253 258
254 if (!nskb) 259 if (!nskb)
255 goto out; 260 goto out;
256 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD); 261 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD);
257 llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127); 262 llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127);
258 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, llc_station_mac_sa); 263 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, llc_station_mac_sa);
259 if (rc) 264 if (unlikely(rc))
260 goto free; 265 goto free;
261 llc_station_send_pdu(nskb); 266 llc_station_send_pdu(nskb);
262out: 267out:
@@ -270,18 +275,17 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
270{ 275{
271 u8 mac_da[ETH_ALEN], dsap; 276 u8 mac_da[ETH_ALEN], dsap;
272 int rc = 1; 277 int rc = 1;
273 struct sk_buff* nskb = llc_alloc_frame(); 278 struct sk_buff* nskb = llc_alloc_frame(NULL, skb->dev);
274 279
275 if (!nskb) 280 if (!nskb)
276 goto out; 281 goto out;
277 rc = 0; 282 rc = 0;
278 nskb->dev = skb->dev;
279 llc_pdu_decode_sa(skb, mac_da); 283 llc_pdu_decode_sa(skb, mac_da);
280 llc_pdu_decode_ssap(skb, &dsap); 284 llc_pdu_decode_ssap(skb, &dsap);
281 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); 285 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP);
282 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); 286 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127);
283 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da); 287 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da);
284 if (rc) 288 if (unlikely(rc))
285 goto free; 289 goto free;
286 llc_station_send_pdu(nskb); 290 llc_station_send_pdu(nskb);
287out: 291out:
@@ -295,18 +299,17 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
295{ 299{
296 u8 mac_da[ETH_ALEN], dsap; 300 u8 mac_da[ETH_ALEN], dsap;
297 int rc = 1; 301 int rc = 1;
298 struct sk_buff *nskb = llc_alloc_frame(); 302 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev);
299 303
300 if (!nskb) 304 if (!nskb)
301 goto out; 305 goto out;
302 rc = 0; 306 rc = 0;
303 nskb->dev = skb->dev;
304 llc_pdu_decode_sa(skb, mac_da); 307 llc_pdu_decode_sa(skb, mac_da);
305 llc_pdu_decode_ssap(skb, &dsap); 308 llc_pdu_decode_ssap(skb, &dsap);
306 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); 309 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP);
307 llc_pdu_init_as_test_rsp(nskb, skb); 310 llc_pdu_init_as_test_rsp(nskb, skb);
308 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da); 311 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da);
309 if (rc) 312 if (unlikely(rc))
310 goto free; 313 goto free;
311 llc_station_send_pdu(nskb); 314 llc_station_send_pdu(nskb);
312out: 315out:
@@ -689,7 +692,8 @@ int __init llc_station_init(void)
689 init_timer(&llc_main_station.ack_timer); 692 init_timer(&llc_main_station.ack_timer);
690 llc_main_station.ack_timer.data = (unsigned long)&llc_main_station; 693 llc_main_station.ack_timer.data = (unsigned long)&llc_main_station;
691 llc_main_station.ack_timer.function = llc_station_ack_tmr_cb; 694 llc_main_station.ack_timer.function = llc_station_ack_tmr_cb;
692 695 llc_main_station.ack_timer.expires = jiffies +
696 sysctl_llc_station_ack_timeout;
693 skb = alloc_skb(0, GFP_ATOMIC); 697 skb = alloc_skb(0, GFP_ATOMIC);
694 if (!skb) 698 if (!skb)
695 goto out; 699 goto out;
@@ -697,7 +701,6 @@ int __init llc_station_init(void)
697 llc_set_station_handler(llc_station_rcv); 701 llc_set_station_handler(llc_station_rcv);
698 ev = llc_station_ev(skb); 702 ev = llc_station_ev(skb);
699 memset(ev, 0, sizeof(*ev)); 703 memset(ev, 0, sizeof(*ev));
700 llc_main_station.ack_timer.expires = jiffies + 3 * HZ;
701 llc_main_station.maximum_retry = 1; 704 llc_main_station.maximum_retry = 1;
702 llc_main_station.state = LLC_STATION_STATE_DOWN; 705 llc_main_station.state = LLC_STATION_STATE_DOWN;
703 ev->type = LLC_STATION_EV_TYPE_SIMPLE; 706 ev->type = LLC_STATION_EV_TYPE_SIMPLE;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
new file mode 100644
index 000000000000..d1eaddb13633
--- /dev/null
+++ b/net/llc/sysctl_net_llc.c
@@ -0,0 +1,131 @@
1/*
2 * sysctl_net_llc.c: sysctl interface to LLC net subsystem.
3 *
4 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 */
6
7#include <linux/config.h>
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/sysctl.h>
11#include <net/llc.h>
12
13#ifndef CONFIG_SYSCTL
14#error This file should not be compiled without CONFIG_SYSCTL defined
15#endif
16
17static struct ctl_table llc2_timeout_table[] = {
18 {
19 .ctl_name = NET_LLC2_ACK_TIMEOUT,
20 .procname = "ack",
21 .data = &sysctl_llc2_ack_timeout,
22 .maxlen = sizeof(long),
23 .mode = 0644,
24 .proc_handler = &proc_dointvec_jiffies,
25 .strategy = &sysctl_jiffies,
26 },
27 {
28 .ctl_name = NET_LLC2_BUSY_TIMEOUT,
29 .procname = "busy",
30 .data = &sysctl_llc2_busy_timeout,
31 .maxlen = sizeof(long),
32 .mode = 0644,
33 .proc_handler = &proc_dointvec_jiffies,
34 .strategy = &sysctl_jiffies,
35 },
36 {
37 .ctl_name = NET_LLC2_P_TIMEOUT,
38 .procname = "p",
39 .data = &sysctl_llc2_p_timeout,
40 .maxlen = sizeof(long),
41 .mode = 0644,
42 .proc_handler = &proc_dointvec_jiffies,
43 .strategy = &sysctl_jiffies,
44 },
45 {
46 .ctl_name = NET_LLC2_REJ_TIMEOUT,
47 .procname = "rej",
48 .data = &sysctl_llc2_rej_timeout,
49 .maxlen = sizeof(long),
50 .mode = 0644,
51 .proc_handler = &proc_dointvec_jiffies,
52 .strategy = &sysctl_jiffies,
53 },
54 { 0 },
55};
56
57static struct ctl_table llc_station_table[] = {
58 {
59 .ctl_name = NET_LLC_STATION_ACK_TIMEOUT,
60 .procname = "ack_timeout",
61 .data = &sysctl_llc_station_ack_timeout,
62 .maxlen = sizeof(long),
63 .mode = 0644,
64 .proc_handler = &proc_dointvec_jiffies,
65 .strategy = &sysctl_jiffies,
66 },
67 { 0 },
68};
69
70static struct ctl_table llc2_dir_timeout_table[] = {
71 {
72 .ctl_name = NET_LLC2,
73 .procname = "timeout",
74 .mode = 0555,
75 .child = llc2_timeout_table,
76 },
77 { 0 },
78};
79
80static struct ctl_table llc_table[] = {
81 {
82 .ctl_name = NET_LLC2,
83 .procname = "llc2",
84 .mode = 0555,
85 .child = llc2_dir_timeout_table,
86 },
87 {
88 .ctl_name = NET_LLC_STATION,
89 .procname = "station",
90 .mode = 0555,
91 .child = llc_station_table,
92 },
93 { 0 },
94};
95
96static struct ctl_table llc_dir_table[] = {
97 {
98 .ctl_name = NET_LLC,
99 .procname = "llc",
100 .mode = 0555,
101 .child = llc_table,
102 },
103 { 0 },
104};
105
106static struct ctl_table llc_root_table[] = {
107 {
108 .ctl_name = CTL_NET,
109 .procname = "net",
110 .mode = 0555,
111 .child = llc_dir_table,
112 },
113 { 0 },
114};
115
116static struct ctl_table_header *llc_table_header;
117
118int __init llc_sysctl_init(void)
119{
120 llc_table_header = register_sysctl_table(llc_root_table, 1);
121
122 return llc_table_header ? 0 : -ENOMEM;
123}
124
125void llc_sysctl_exit(void)
126{
127 if (llc_table_header) {
128 unregister_sysctl_table(llc_table_header);
129 llc_table_header = NULL;
130 }
131}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index ff5601ceedcb..efcd10f996ba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -494,8 +494,8 @@ __build_packet_message(struct nfulnl_instance *inst,
494 if (skb->tstamp.off_sec) { 494 if (skb->tstamp.off_sec) {
495 struct nfulnl_msg_packet_timestamp ts; 495 struct nfulnl_msg_packet_timestamp ts;
496 496
497 ts.sec = cpu_to_be64(skb_tv_base.tv_sec + skb->tstamp.off_sec); 497 ts.sec = cpu_to_be64(skb->tstamp.off_sec);
498 ts.usec = cpu_to_be64(skb_tv_base.tv_usec + skb->tstamp.off_usec); 498 ts.usec = cpu_to_be64(skb->tstamp.off_usec);
499 499
500 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); 500 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
501 } 501 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f81fe8c52e99..eaa44c49567b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -492,8 +492,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
492 if (entry->skb->tstamp.off_sec) { 492 if (entry->skb->tstamp.off_sec) {
493 struct nfqnl_msg_packet_timestamp ts; 493 struct nfqnl_msg_packet_timestamp ts;
494 494
495 ts.sec = cpu_to_be64(skb_tv_base.tv_sec + entry->skb->tstamp.off_sec); 495 ts.sec = cpu_to_be64(entry->skb->tstamp.off_sec);
496 ts.usec = cpu_to_be64(skb_tv_base.tv_usec + entry->skb->tstamp.off_usec); 496 ts.usec = cpu_to_be64(entry->skb->tstamp.off_usec);
497 497
498 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); 498 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
499 } 499 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ee865d88183b..499ae3df4a44 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -654,8 +654,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
654 __net_timestamp(skb); 654 __net_timestamp(skb);
655 sock_enable_timestamp(sk); 655 sock_enable_timestamp(sk);
656 } 656 }
657 h->tp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; 657 h->tp_sec = skb->tstamp.off_sec;
658 h->tp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; 658 h->tp_usec = skb->tstamp.off_usec;
659 659
660 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); 660 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
661 sll->sll_halen = 0; 661 sll->sll_halen = 0;
@@ -761,12 +761,6 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
761 if (dev->hard_header) { 761 if (dev->hard_header) {
762 int res; 762 int res;
763 err = -EINVAL; 763 err = -EINVAL;
764 if (saddr) {
765 if (saddr->sll_halen != dev->addr_len)
766 goto out_free;
767 if (saddr->sll_hatype != dev->type)
768 goto out_free;
769 }
770 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len); 764 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
771 if (sock->type != SOCK_DGRAM) { 765 if (sock->type != SOCK_DGRAM) {
772 skb->tail = skb->data; 766 skb->tail = skb->data;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 5acb1680524a..829fdbc4400b 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1472,22 +1472,25 @@ static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62
1472static int __init rose_proto_init(void) 1472static int __init rose_proto_init(void)
1473{ 1473{
1474 int i; 1474 int i;
1475 int rc = proto_register(&rose_proto, 0); 1475 int rc;
1476 1476
1477 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1478 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1479 rc = -EINVAL;
1480 goto out;
1481 }
1482
1483 rc = proto_register(&rose_proto, 0);
1477 if (rc != 0) 1484 if (rc != 0)
1478 goto out; 1485 goto out;
1479 1486
1480 rose_callsign = null_ax25_address; 1487 rose_callsign = null_ax25_address;
1481 1488
1482 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1483 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1484 return -1;
1485 }
1486
1487 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1489 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1488 if (dev_rose == NULL) { 1490 if (dev_rose == NULL) {
1489 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1491 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1490 return -1; 1492 rc = -ENOMEM;
1493 goto out_proto_unregister;
1491 } 1494 }
1492 1495
1493 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); 1496 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
@@ -1500,10 +1503,12 @@ static int __init rose_proto_init(void)
1500 name, rose_setup); 1503 name, rose_setup);
1501 if (!dev) { 1504 if (!dev) {
1502 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1505 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1506 rc = -ENOMEM;
1503 goto fail; 1507 goto fail;
1504 } 1508 }
1505 if (register_netdev(dev)) { 1509 rc = register_netdev(dev);
1506 printk(KERN_ERR "ROSE: netdevice regeistration failed\n"); 1510 if (rc) {
1511 printk(KERN_ERR "ROSE: netdevice registration failed\n");
1507 free_netdev(dev); 1512 free_netdev(dev);
1508 goto fail; 1513 goto fail;
1509 } 1514 }
@@ -1536,8 +1541,9 @@ fail:
1536 free_netdev(dev_rose[i]); 1541 free_netdev(dev_rose[i]);
1537 } 1542 }
1538 kfree(dev_rose); 1543 kfree(dev_rose);
1544out_proto_unregister:
1539 proto_unregister(&rose_proto); 1545 proto_unregister(&rose_proto);
1540 return -ENOMEM; 1546 goto out;
1541} 1547}
1542module_init(rose_proto_init); 1548module_init(rose_proto_init);
1543 1549
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 00eae5f9a01a..cf68a59fdc5a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -393,10 +393,10 @@ META_COLLECTOR(int_sk_route_caps)
393 dst->value = skb->sk->sk_route_caps; 393 dst->value = skb->sk->sk_route_caps;
394} 394}
395 395
396META_COLLECTOR(int_sk_hashent) 396META_COLLECTOR(int_sk_hash)
397{ 397{
398 SKIP_NONLOCAL(skb); 398 SKIP_NONLOCAL(skb);
399 dst->value = skb->sk->sk_hashent; 399 dst->value = skb->sk->sk_hash;
400} 400}
401 401
402META_COLLECTOR(int_sk_lingertime) 402META_COLLECTOR(int_sk_lingertime)
@@ -515,7 +515,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
515 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 515 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
516 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 516 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
517 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps), 517 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps),
518 [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent), 518 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
519 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 519 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
520 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 520 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
521 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), 521 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e7025be77691..f01d1c9002a1 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -147,7 +147,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
147 struct sctp_sockaddr_entry *addr; 147 struct sctp_sockaddr_entry *addr;
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 if ((in_dev = __in_dev_get(dev)) == NULL) { 150 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
151 rcu_read_unlock(); 151 rcu_read_unlock();
152 return; 152 return;
153 } 153 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 86073df418f5..505c7de10c50 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2414,6 +2414,17 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2414 skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); 2414 skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t));
2415 chunk->subh.shutdown_hdr = sdh; 2415 chunk->subh.shutdown_hdr = sdh;
2416 2416
2417 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
2418 * When a peer sends a SHUTDOWN, SCTP delivers this notification to
2419 * inform the application that it should cease sending data.
2420 */
2421 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
2422 if (!ev) {
2423 disposition = SCTP_DISPOSITION_NOMEM;
2424 goto out;
2425 }
2426 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
2427
2417 /* Upon the reception of the SHUTDOWN, the peer endpoint shall 2428 /* Upon the reception of the SHUTDOWN, the peer endpoint shall
2418 * - enter the SHUTDOWN-RECEIVED state, 2429 * - enter the SHUTDOWN-RECEIVED state,
2419 * - stop accepting new data from its SCTP user 2430 * - stop accepting new data from its SCTP user
@@ -2439,17 +2450,6 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2439 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, 2450 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
2440 SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack)); 2451 SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack));
2441 2452
2442 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
2443 * When a peer sends a SHUTDOWN, SCTP delivers this notification to
2444 * inform the application that it should cease sending data.
2445 */
2446 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
2447 if (!ev) {
2448 disposition = SCTP_DISPOSITION_NOMEM;
2449 goto out;
2450 }
2451 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
2452
2453out: 2453out:
2454 return disposition; 2454 return disposition;
2455} 2455}
diff --git a/net/socket.c b/net/socket.c
index f9264472377f..3145103cdf54 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1145,8 +1145,11 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
1145 if (!try_module_get(net_families[family]->owner)) 1145 if (!try_module_get(net_families[family]->owner))
1146 goto out_release; 1146 goto out_release;
1147 1147
1148 if ((err = net_families[family]->create(sock, protocol)) < 0) 1148 if ((err = net_families[family]->create(sock, protocol)) < 0) {
1149 sock->ops = NULL;
1149 goto out_module_put; 1150 goto out_module_put;
1151 }
1152
1150 /* 1153 /*
1151 * Now to bump the refcnt of the [loadable] module that owns this 1154 * Now to bump the refcnt of the [loadable] module that owns this
1152 * socket at sock_release time we decrement its refcnt. 1155 * socket at sock_release time we decrement its refcnt.
@@ -1360,16 +1363,16 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int _
1360 newsock->type = sock->type; 1363 newsock->type = sock->type;
1361 newsock->ops = sock->ops; 1364 newsock->ops = sock->ops;
1362 1365
1363 err = security_socket_accept(sock, newsock);
1364 if (err)
1365 goto out_release;
1366
1367 /* 1366 /*
1368 * We don't need try_module_get here, as the listening socket (sock) 1367 * We don't need try_module_get here, as the listening socket (sock)
1369 * has the protocol module (sock->ops->owner) held. 1368 * has the protocol module (sock->ops->owner) held.
1370 */ 1369 */
1371 __module_get(newsock->ops->owner); 1370 __module_get(newsock->ops->owner);
1372 1371
1372 err = security_socket_accept(sock, newsock);
1373 if (err)
1374 goto out_release;
1375
1373 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1376 err = sock->ops->accept(sock, newsock, sock->file->f_flags);
1374 if (err < 0) 1377 if (err < 0)
1375 goto out_release; 1378 goto out_release;
@@ -1700,7 +1703,9 @@ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
1700 struct socket *sock; 1703 struct socket *sock;
1701 char address[MAX_SOCK_ADDR]; 1704 char address[MAX_SOCK_ADDR];
1702 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1705 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1703 unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */ 1706 unsigned char ctl[sizeof(struct cmsghdr) + 20]
1707 __attribute__ ((aligned (sizeof(__kernel_size_t))));
1708 /* 20 is size of ipv6_pktinfo */
1704 unsigned char *ctl_buf = ctl; 1709 unsigned char *ctl_buf = ctl;
1705 struct msghdr msg_sys; 1710 struct msghdr msg_sys;
1706 int err, ctl_len, iov_size, total_len; 1711 int err, ctl_len, iov_size, total_len;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c5241fcbb966..55538f6b60ff 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -16,6 +16,8 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/sysctl.h> 17#include <linux/sysctl.h>
18 18
19#include <net/sock.h>
20
19#ifdef CONFIG_INET 21#ifdef CONFIG_INET
20#include <net/ip.h> 22#include <net/ip.h>
21#endif 23#endif
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index d8ee38aede26..f2ee673329a7 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -295,11 +295,13 @@ static int do_pcmcia_entry(const char *filename,
295{ 295{
296 unsigned int i; 296 unsigned int i;
297 297
298 id->match_flags = TO_NATIVE(id->match_flags);
298 id->manf_id = TO_NATIVE(id->manf_id); 299 id->manf_id = TO_NATIVE(id->manf_id);
299 id->card_id = TO_NATIVE(id->card_id); 300 id->card_id = TO_NATIVE(id->card_id);
300 id->func_id = TO_NATIVE(id->func_id); 301 id->func_id = TO_NATIVE(id->func_id);
301 id->function = TO_NATIVE(id->function); 302 id->function = TO_NATIVE(id->function);
302 id->device_no = TO_NATIVE(id->device_no); 303 id->device_no = TO_NATIVE(id->device_no);
304
303 for (i=0; i<4; i++) { 305 for (i=0; i<4; i++) {
304 id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]); 306 id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]);
305 } 307 }
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 46c8602661c9..db99ed434f3a 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -71,26 +71,26 @@ extern void keyring_publish_name(struct key *keyring);
71 71
72extern int __key_link(struct key *keyring, struct key *key); 72extern int __key_link(struct key *keyring, struct key *key);
73 73
74extern struct key *__keyring_search_one(struct key *keyring, 74extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
75 const struct key_type *type, 75 const struct key_type *type,
76 const char *description, 76 const char *description,
77 key_perm_t perm); 77 key_perm_t perm);
78 78
79extern struct key *keyring_search_instkey(struct key *keyring, 79extern struct key *keyring_search_instkey(struct key *keyring,
80 key_serial_t target_id); 80 key_serial_t target_id);
81 81
82typedef int (*key_match_func_t)(const struct key *, const void *); 82typedef int (*key_match_func_t)(const struct key *, const void *);
83 83
84extern struct key *keyring_search_aux(struct key *keyring, 84extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
85 struct task_struct *tsk, 85 struct task_struct *tsk,
86 struct key_type *type, 86 struct key_type *type,
87 const void *description, 87 const void *description,
88 key_match_func_t match); 88 key_match_func_t match);
89 89
90extern struct key *search_process_keyrings(struct key_type *type, 90extern key_ref_t search_process_keyrings(struct key_type *type,
91 const void *description, 91 const void *description,
92 key_match_func_t match, 92 key_match_func_t match,
93 struct task_struct *tsk); 93 struct task_struct *tsk);
94 94
95extern struct key *find_keyring_by_name(const char *name, key_serial_t bound); 95extern struct key *find_keyring_by_name(const char *name, key_serial_t bound);
96 96
diff --git a/security/keys/key.c b/security/keys/key.c
index fb89f9844465..2182be9e9309 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -693,14 +693,15 @@ void key_type_put(struct key_type *ktype)
693 * - the key has an incremented refcount 693 * - the key has an incremented refcount
694 * - we need to put the key if we get an error 694 * - we need to put the key if we get an error
695 */ 695 */
696static inline struct key *__key_update(struct key *key, const void *payload, 696static inline key_ref_t __key_update(key_ref_t key_ref,
697 size_t plen) 697 const void *payload, size_t plen)
698{ 698{
699 struct key *key = key_ref_to_ptr(key_ref);
699 int ret; 700 int ret;
700 701
701 /* need write permission on the key to update it */ 702 /* need write permission on the key to update it */
702 ret = -EACCES; 703 ret = -EACCES;
703 if (!key_permission(key, KEY_WRITE)) 704 if (!key_permission(key_ref, KEY_WRITE))
704 goto error; 705 goto error;
705 706
706 ret = -EEXIST; 707 ret = -EEXIST;
@@ -719,12 +720,12 @@ static inline struct key *__key_update(struct key *key, const void *payload,
719 720
720 if (ret < 0) 721 if (ret < 0)
721 goto error; 722 goto error;
722 out: 723out:
723 return key; 724 return key_ref;
724 725
725 error: 726error:
726 key_put(key); 727 key_put(key);
727 key = ERR_PTR(ret); 728 key_ref = ERR_PTR(ret);
728 goto out; 729 goto out;
729 730
730} /* end __key_update() */ 731} /* end __key_update() */
@@ -734,52 +735,56 @@ static inline struct key *__key_update(struct key *key, const void *payload,
734 * search the specified keyring for a key of the same description; if one is 735 * search the specified keyring for a key of the same description; if one is
735 * found, update it, otherwise add a new one 736 * found, update it, otherwise add a new one
736 */ 737 */
737struct key *key_create_or_update(struct key *keyring, 738key_ref_t key_create_or_update(key_ref_t keyring_ref,
738 const char *type, 739 const char *type,
739 const char *description, 740 const char *description,
740 const void *payload, 741 const void *payload,
741 size_t plen, 742 size_t plen,
742 int not_in_quota) 743 int not_in_quota)
743{ 744{
744 struct key_type *ktype; 745 struct key_type *ktype;
745 struct key *key = NULL; 746 struct key *keyring, *key = NULL;
746 key_perm_t perm; 747 key_perm_t perm;
748 key_ref_t key_ref;
747 int ret; 749 int ret;
748 750
749 key_check(keyring);
750
751 /* look up the key type to see if it's one of the registered kernel 751 /* look up the key type to see if it's one of the registered kernel
752 * types */ 752 * types */
753 ktype = key_type_lookup(type); 753 ktype = key_type_lookup(type);
754 if (IS_ERR(ktype)) { 754 if (IS_ERR(ktype)) {
755 key = ERR_PTR(-ENODEV); 755 key_ref = ERR_PTR(-ENODEV);
756 goto error; 756 goto error;
757 } 757 }
758 758
759 ret = -EINVAL; 759 key_ref = ERR_PTR(-EINVAL);
760 if (!ktype->match || !ktype->instantiate) 760 if (!ktype->match || !ktype->instantiate)
761 goto error_2; 761 goto error_2;
762 762
763 keyring = key_ref_to_ptr(keyring_ref);
764
765 key_check(keyring);
766
767 down_write(&keyring->sem);
768
769 /* if we're going to allocate a new key, we're going to have
770 * to modify the keyring */
771 key_ref = ERR_PTR(-EACCES);
772 if (!key_permission(keyring_ref, KEY_WRITE))
773 goto error_3;
774
763 /* search for an existing key of the same type and description in the 775 /* search for an existing key of the same type and description in the
764 * destination keyring 776 * destination keyring
765 */ 777 */
766 down_write(&keyring->sem); 778 key_ref = __keyring_search_one(keyring_ref, ktype, description, 0);
767 779 if (!IS_ERR(key_ref))
768 key = __keyring_search_one(keyring, ktype, description, 0);
769 if (!IS_ERR(key))
770 goto found_matching_key; 780 goto found_matching_key;
771 781
772 /* if we're going to allocate a new key, we're going to have to modify
773 * the keyring */
774 ret = -EACCES;
775 if (!key_permission(keyring, KEY_WRITE))
776 goto error_3;
777
778 /* decide on the permissions we want */ 782 /* decide on the permissions we want */
779 perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; 783 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK;
784 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK;
780 785
781 if (ktype->read) 786 if (ktype->read)
782 perm |= KEY_USR_READ; 787 perm |= KEY_POS_READ | KEY_USR_READ;
783 788
784 if (ktype == &key_type_keyring || ktype->update) 789 if (ktype == &key_type_keyring || ktype->update)
785 perm |= KEY_USR_WRITE; 790 perm |= KEY_USR_WRITE;
@@ -788,7 +793,7 @@ struct key *key_create_or_update(struct key *keyring,
788 key = key_alloc(ktype, description, current->fsuid, current->fsgid, 793 key = key_alloc(ktype, description, current->fsuid, current->fsgid,
789 perm, not_in_quota); 794 perm, not_in_quota);
790 if (IS_ERR(key)) { 795 if (IS_ERR(key)) {
791 ret = PTR_ERR(key); 796 key_ref = ERR_PTR(PTR_ERR(key));
792 goto error_3; 797 goto error_3;
793 } 798 }
794 799
@@ -796,15 +801,18 @@ struct key *key_create_or_update(struct key *keyring,
796 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); 801 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
797 if (ret < 0) { 802 if (ret < 0) {
798 key_put(key); 803 key_put(key);
799 key = ERR_PTR(ret); 804 key_ref = ERR_PTR(ret);
805 goto error_3;
800 } 806 }
801 807
808 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
809
802 error_3: 810 error_3:
803 up_write(&keyring->sem); 811 up_write(&keyring->sem);
804 error_2: 812 error_2:
805 key_type_put(ktype); 813 key_type_put(ktype);
806 error: 814 error:
807 return key; 815 return key_ref;
808 816
809 found_matching_key: 817 found_matching_key:
810 /* we found a matching key, so we're going to try to update it 818 /* we found a matching key, so we're going to try to update it
@@ -813,7 +821,7 @@ struct key *key_create_or_update(struct key *keyring,
813 up_write(&keyring->sem); 821 up_write(&keyring->sem);
814 key_type_put(ktype); 822 key_type_put(ktype);
815 823
816 key = __key_update(key, payload, plen); 824 key_ref = __key_update(key_ref, payload, plen);
817 goto error; 825 goto error;
818 826
819} /* end key_create_or_update() */ 827} /* end key_create_or_update() */
@@ -824,15 +832,16 @@ EXPORT_SYMBOL(key_create_or_update);
824/* 832/*
825 * update a key 833 * update a key
826 */ 834 */
827int key_update(struct key *key, const void *payload, size_t plen) 835int key_update(key_ref_t key_ref, const void *payload, size_t plen)
828{ 836{
837 struct key *key = key_ref_to_ptr(key_ref);
829 int ret; 838 int ret;
830 839
831 key_check(key); 840 key_check(key);
832 841
833 /* the key must be writable */ 842 /* the key must be writable */
834 ret = -EACCES; 843 ret = -EACCES;
835 if (!key_permission(key, KEY_WRITE)) 844 if (!key_permission(key_ref, KEY_WRITE))
836 goto error; 845 goto error;
837 846
838 /* attempt to update it if supported */ 847 /* attempt to update it if supported */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index a6516a64b297..4c670ee6acf9 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -34,7 +34,7 @@ asmlinkage long sys_add_key(const char __user *_type,
34 size_t plen, 34 size_t plen,
35 key_serial_t ringid) 35 key_serial_t ringid)
36{ 36{
37 struct key *keyring, *key; 37 key_ref_t keyring_ref, key_ref;
38 char type[32], *description; 38 char type[32], *description;
39 void *payload; 39 void *payload;
40 long dlen, ret; 40 long dlen, ret;
@@ -86,25 +86,25 @@ asmlinkage long sys_add_key(const char __user *_type,
86 } 86 }
87 87
88 /* find the target keyring (which must be writable) */ 88 /* find the target keyring (which must be writable) */
89 keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); 89 keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE);
90 if (IS_ERR(keyring)) { 90 if (IS_ERR(keyring_ref)) {
91 ret = PTR_ERR(keyring); 91 ret = PTR_ERR(keyring_ref);
92 goto error3; 92 goto error3;
93 } 93 }
94 94
95 /* create or update the requested key and add it to the target 95 /* create or update the requested key and add it to the target
96 * keyring */ 96 * keyring */
97 key = key_create_or_update(keyring, type, description, 97 key_ref = key_create_or_update(keyring_ref, type, description,
98 payload, plen, 0); 98 payload, plen, 0);
99 if (!IS_ERR(key)) { 99 if (!IS_ERR(key_ref)) {
100 ret = key->serial; 100 ret = key_ref_to_ptr(key_ref)->serial;
101 key_put(key); 101 key_ref_put(key_ref);
102 } 102 }
103 else { 103 else {
104 ret = PTR_ERR(key); 104 ret = PTR_ERR(key_ref);
105 } 105 }
106 106
107 key_put(keyring); 107 key_ref_put(keyring_ref);
108 error3: 108 error3:
109 kfree(payload); 109 kfree(payload);
110 error2: 110 error2:
@@ -131,7 +131,8 @@ asmlinkage long sys_request_key(const char __user *_type,
131 key_serial_t destringid) 131 key_serial_t destringid)
132{ 132{
133 struct key_type *ktype; 133 struct key_type *ktype;
134 struct key *key, *dest; 134 struct key *key;
135 key_ref_t dest_ref;
135 char type[32], *description, *callout_info; 136 char type[32], *description, *callout_info;
136 long dlen, ret; 137 long dlen, ret;
137 138
@@ -187,11 +188,11 @@ asmlinkage long sys_request_key(const char __user *_type,
187 } 188 }
188 189
189 /* get the destination keyring if specified */ 190 /* get the destination keyring if specified */
190 dest = NULL; 191 dest_ref = NULL;
191 if (destringid) { 192 if (destringid) {
192 dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); 193 dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE);
193 if (IS_ERR(dest)) { 194 if (IS_ERR(dest_ref)) {
194 ret = PTR_ERR(dest); 195 ret = PTR_ERR(dest_ref);
195 goto error3; 196 goto error3;
196 } 197 }
197 } 198 }
@@ -204,7 +205,8 @@ asmlinkage long sys_request_key(const char __user *_type,
204 } 205 }
205 206
206 /* do the search */ 207 /* do the search */
207 key = request_key_and_link(ktype, description, callout_info, dest); 208 key = request_key_and_link(ktype, description, callout_info,
209 key_ref_to_ptr(dest_ref));
208 if (IS_ERR(key)) { 210 if (IS_ERR(key)) {
209 ret = PTR_ERR(key); 211 ret = PTR_ERR(key);
210 goto error5; 212 goto error5;
@@ -216,7 +218,7 @@ asmlinkage long sys_request_key(const char __user *_type,
216 error5: 218 error5:
217 key_type_put(ktype); 219 key_type_put(ktype);
218 error4: 220 error4:
219 key_put(dest); 221 key_ref_put(dest_ref);
220 error3: 222 error3:
221 kfree(callout_info); 223 kfree(callout_info);
222 error2: 224 error2:
@@ -234,17 +236,17 @@ asmlinkage long sys_request_key(const char __user *_type,
234 */ 236 */
235long keyctl_get_keyring_ID(key_serial_t id, int create) 237long keyctl_get_keyring_ID(key_serial_t id, int create)
236{ 238{
237 struct key *key; 239 key_ref_t key_ref;
238 long ret; 240 long ret;
239 241
240 key = lookup_user_key(NULL, id, create, 0, KEY_SEARCH); 242 key_ref = lookup_user_key(NULL, id, create, 0, KEY_SEARCH);
241 if (IS_ERR(key)) { 243 if (IS_ERR(key_ref)) {
242 ret = PTR_ERR(key); 244 ret = PTR_ERR(key_ref);
243 goto error; 245 goto error;
244 } 246 }
245 247
246 ret = key->serial; 248 ret = key_ref_to_ptr(key_ref)->serial;
247 key_put(key); 249 key_ref_put(key_ref);
248 error: 250 error:
249 return ret; 251 return ret;
250 252
@@ -302,7 +304,7 @@ long keyctl_update_key(key_serial_t id,
302 const void __user *_payload, 304 const void __user *_payload,
303 size_t plen) 305 size_t plen)
304{ 306{
305 struct key *key; 307 key_ref_t key_ref;
306 void *payload; 308 void *payload;
307 long ret; 309 long ret;
308 310
@@ -324,16 +326,16 @@ long keyctl_update_key(key_serial_t id,
324 } 326 }
325 327
326 /* find the target key (which must be writable) */ 328 /* find the target key (which must be writable) */
327 key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); 329 key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE);
328 if (IS_ERR(key)) { 330 if (IS_ERR(key_ref)) {
329 ret = PTR_ERR(key); 331 ret = PTR_ERR(key_ref);
330 goto error2; 332 goto error2;
331 } 333 }
332 334
333 /* update the key */ 335 /* update the key */
334 ret = key_update(key, payload, plen); 336 ret = key_update(key_ref, payload, plen);
335 337
336 key_put(key); 338 key_ref_put(key_ref);
337 error2: 339 error2:
338 kfree(payload); 340 kfree(payload);
339 error: 341 error:
@@ -349,19 +351,19 @@ long keyctl_update_key(key_serial_t id,
349 */ 351 */
350long keyctl_revoke_key(key_serial_t id) 352long keyctl_revoke_key(key_serial_t id)
351{ 353{
352 struct key *key; 354 key_ref_t key_ref;
353 long ret; 355 long ret;
354 356
355 key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); 357 key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE);
356 if (IS_ERR(key)) { 358 if (IS_ERR(key_ref)) {
357 ret = PTR_ERR(key); 359 ret = PTR_ERR(key_ref);
358 goto error; 360 goto error;
359 } 361 }
360 362
361 key_revoke(key); 363 key_revoke(key_ref_to_ptr(key_ref));
362 ret = 0; 364 ret = 0;
363 365
364 key_put(key); 366 key_ref_put(key_ref);
365 error: 367 error:
366 return ret; 368 return ret;
367 369
@@ -375,18 +377,18 @@ long keyctl_revoke_key(key_serial_t id)
375 */ 377 */
376long keyctl_keyring_clear(key_serial_t ringid) 378long keyctl_keyring_clear(key_serial_t ringid)
377{ 379{
378 struct key *keyring; 380 key_ref_t keyring_ref;
379 long ret; 381 long ret;
380 382
381 keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); 383 keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE);
382 if (IS_ERR(keyring)) { 384 if (IS_ERR(keyring_ref)) {
383 ret = PTR_ERR(keyring); 385 ret = PTR_ERR(keyring_ref);
384 goto error; 386 goto error;
385 } 387 }
386 388
387 ret = keyring_clear(keyring); 389 ret = keyring_clear(key_ref_to_ptr(keyring_ref));
388 390
389 key_put(keyring); 391 key_ref_put(keyring_ref);
390 error: 392 error:
391 return ret; 393 return ret;
392 394
@@ -401,26 +403,26 @@ long keyctl_keyring_clear(key_serial_t ringid)
401 */ 403 */
402long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) 404long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
403{ 405{
404 struct key *keyring, *key; 406 key_ref_t keyring_ref, key_ref;
405 long ret; 407 long ret;
406 408
407 keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); 409 keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE);
408 if (IS_ERR(keyring)) { 410 if (IS_ERR(keyring_ref)) {
409 ret = PTR_ERR(keyring); 411 ret = PTR_ERR(keyring_ref);
410 goto error; 412 goto error;
411 } 413 }
412 414
413 key = lookup_user_key(NULL, id, 1, 0, KEY_LINK); 415 key_ref = lookup_user_key(NULL, id, 1, 0, KEY_LINK);
414 if (IS_ERR(key)) { 416 if (IS_ERR(key_ref)) {
415 ret = PTR_ERR(key); 417 ret = PTR_ERR(key_ref);
416 goto error2; 418 goto error2;
417 } 419 }
418 420
419 ret = key_link(keyring, key); 421 ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
420 422
421 key_put(key); 423 key_ref_put(key_ref);
422 error2: 424 error2:
423 key_put(keyring); 425 key_ref_put(keyring_ref);
424 error: 426 error:
425 return ret; 427 return ret;
426 428
@@ -435,26 +437,26 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
435 */ 437 */
436long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) 438long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
437{ 439{
438 struct key *keyring, *key; 440 key_ref_t keyring_ref, key_ref;
439 long ret; 441 long ret;
440 442
441 keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE); 443 keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE);
442 if (IS_ERR(keyring)) { 444 if (IS_ERR(keyring_ref)) {
443 ret = PTR_ERR(keyring); 445 ret = PTR_ERR(keyring_ref);
444 goto error; 446 goto error;
445 } 447 }
446 448
447 key = lookup_user_key(NULL, id, 0, 0, 0); 449 key_ref = lookup_user_key(NULL, id, 0, 0, 0);
448 if (IS_ERR(key)) { 450 if (IS_ERR(key_ref)) {
449 ret = PTR_ERR(key); 451 ret = PTR_ERR(key_ref);
450 goto error2; 452 goto error2;
451 } 453 }
452 454
453 ret = key_unlink(keyring, key); 455 ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
454 456
455 key_put(key); 457 key_ref_put(key_ref);
456 error2: 458 error2:
457 key_put(keyring); 459 key_ref_put(keyring_ref);
458 error: 460 error:
459 return ret; 461 return ret;
460 462
@@ -476,24 +478,26 @@ long keyctl_describe_key(key_serial_t keyid,
476 size_t buflen) 478 size_t buflen)
477{ 479{
478 struct key *key, *instkey; 480 struct key *key, *instkey;
481 key_ref_t key_ref;
479 char *tmpbuf; 482 char *tmpbuf;
480 long ret; 483 long ret;
481 484
482 key = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); 485 key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW);
483 if (IS_ERR(key)) { 486 if (IS_ERR(key_ref)) {
484 /* viewing a key under construction is permitted if we have the 487 /* viewing a key under construction is permitted if we have the
485 * authorisation token handy */ 488 * authorisation token handy */
486 if (PTR_ERR(key) == -EACCES) { 489 if (PTR_ERR(key_ref) == -EACCES) {
487 instkey = key_get_instantiation_authkey(keyid); 490 instkey = key_get_instantiation_authkey(keyid);
488 if (!IS_ERR(instkey)) { 491 if (!IS_ERR(instkey)) {
489 key_put(instkey); 492 key_put(instkey);
490 key = lookup_user_key(NULL, keyid, 0, 1, 0); 493 key_ref = lookup_user_key(NULL, keyid,
491 if (!IS_ERR(key)) 494 0, 1, 0);
495 if (!IS_ERR(key_ref))
492 goto okay; 496 goto okay;
493 } 497 }
494 } 498 }
495 499
496 ret = PTR_ERR(key); 500 ret = PTR_ERR(key_ref);
497 goto error; 501 goto error;
498 } 502 }
499 503
@@ -504,13 +508,16 @@ okay:
504 if (!tmpbuf) 508 if (!tmpbuf)
505 goto error2; 509 goto error2;
506 510
511 key = key_ref_to_ptr(key_ref);
512
507 ret = snprintf(tmpbuf, PAGE_SIZE - 1, 513 ret = snprintf(tmpbuf, PAGE_SIZE - 1,
508 "%s;%d;%d;%06x;%s", 514 "%s;%d;%d;%08x;%s",
509 key->type->name, 515 key_ref_to_ptr(key_ref)->type->name,
510 key->uid, 516 key_ref_to_ptr(key_ref)->uid,
511 key->gid, 517 key_ref_to_ptr(key_ref)->gid,
512 key->perm, 518 key_ref_to_ptr(key_ref)->perm,
513 key->description ? key->description :"" 519 key_ref_to_ptr(key_ref)->description ?
520 key_ref_to_ptr(key_ref)->description : ""
514 ); 521 );
515 522
516 /* include a NUL char at the end of the data */ 523 /* include a NUL char at the end of the data */
@@ -530,7 +537,7 @@ okay:
530 537
531 kfree(tmpbuf); 538 kfree(tmpbuf);
532 error2: 539 error2:
533 key_put(key); 540 key_ref_put(key_ref);
534 error: 541 error:
535 return ret; 542 return ret;
536 543
@@ -552,7 +559,7 @@ long keyctl_keyring_search(key_serial_t ringid,
552 key_serial_t destringid) 559 key_serial_t destringid)
553{ 560{
554 struct key_type *ktype; 561 struct key_type *ktype;
555 struct key *keyring, *key, *dest; 562 key_ref_t keyring_ref, key_ref, dest_ref;
556 char type[32], *description; 563 char type[32], *description;
557 long dlen, ret; 564 long dlen, ret;
558 565
@@ -581,18 +588,18 @@ long keyctl_keyring_search(key_serial_t ringid,
581 goto error2; 588 goto error2;
582 589
583 /* get the keyring at which to begin the search */ 590 /* get the keyring at which to begin the search */
584 keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH); 591 keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH);
585 if (IS_ERR(keyring)) { 592 if (IS_ERR(keyring_ref)) {
586 ret = PTR_ERR(keyring); 593 ret = PTR_ERR(keyring_ref);
587 goto error2; 594 goto error2;
588 } 595 }
589 596
590 /* get the destination keyring if specified */ 597 /* get the destination keyring if specified */
591 dest = NULL; 598 dest_ref = NULL;
592 if (destringid) { 599 if (destringid) {
593 dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); 600 dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE);
594 if (IS_ERR(dest)) { 601 if (IS_ERR(dest_ref)) {
595 ret = PTR_ERR(dest); 602 ret = PTR_ERR(dest_ref);
596 goto error3; 603 goto error3;
597 } 604 }
598 } 605 }
@@ -605,9 +612,9 @@ long keyctl_keyring_search(key_serial_t ringid,
605 } 612 }
606 613
607 /* do the search */ 614 /* do the search */
608 key = keyring_search(keyring, ktype, description); 615 key_ref = keyring_search(keyring_ref, ktype, description);
609 if (IS_ERR(key)) { 616 if (IS_ERR(key_ref)) {
610 ret = PTR_ERR(key); 617 ret = PTR_ERR(key_ref);
611 618
612 /* treat lack or presence of a negative key the same */ 619 /* treat lack or presence of a negative key the same */
613 if (ret == -EAGAIN) 620 if (ret == -EAGAIN)
@@ -616,26 +623,26 @@ long keyctl_keyring_search(key_serial_t ringid,
616 } 623 }
617 624
618 /* link the resulting key to the destination keyring if we can */ 625 /* link the resulting key to the destination keyring if we can */
619 if (dest) { 626 if (dest_ref) {
620 ret = -EACCES; 627 ret = -EACCES;
621 if (!key_permission(key, KEY_LINK)) 628 if (!key_permission(key_ref, KEY_LINK))
622 goto error6; 629 goto error6;
623 630
624 ret = key_link(dest, key); 631 ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
625 if (ret < 0) 632 if (ret < 0)
626 goto error6; 633 goto error6;
627 } 634 }
628 635
629 ret = key->serial; 636 ret = key_ref_to_ptr(key_ref)->serial;
630 637
631 error6: 638 error6:
632 key_put(key); 639 key_ref_put(key_ref);
633 error5: 640 error5:
634 key_type_put(ktype); 641 key_type_put(ktype);
635 error4: 642 error4:
636 key_put(dest); 643 key_ref_put(dest_ref);
637 error3: 644 error3:
638 key_put(keyring); 645 key_ref_put(keyring_ref);
639 error2: 646 error2:
640 kfree(description); 647 kfree(description);
641 error: 648 error:
@@ -645,16 +652,6 @@ long keyctl_keyring_search(key_serial_t ringid,
645 652
646/*****************************************************************************/ 653/*****************************************************************************/
647/* 654/*
648 * see if the key we're looking at is the target key
649 */
650static int keyctl_read_key_same(const struct key *key, const void *target)
651{
652 return key == target;
653
654} /* end keyctl_read_key_same() */
655
656/*****************************************************************************/
657/*
658 * read a user key's payload 655 * read a user key's payload
659 * - the keyring must be readable or the key must be searchable from the 656 * - the keyring must be readable or the key must be searchable from the
660 * process's keyrings 657 * process's keyrings
@@ -665,38 +662,33 @@ static int keyctl_read_key_same(const struct key *key, const void *target)
665 */ 662 */
666long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) 663long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
667{ 664{
668 struct key *key, *skey; 665 struct key *key;
666 key_ref_t key_ref;
669 long ret; 667 long ret;
670 668
671 /* find the key first */ 669 /* find the key first */
672 key = lookup_user_key(NULL, keyid, 0, 0, 0); 670 key_ref = lookup_user_key(NULL, keyid, 0, 0, 0);
673 if (!IS_ERR(key)) { 671 if (IS_ERR(key_ref)) {
674 /* see if we can read it directly */ 672 ret = -ENOKEY;
675 if (key_permission(key, KEY_READ)) 673 goto error;
676 goto can_read_key;
677
678 /* we can't; see if it's searchable from this process's
679 * keyrings
680 * - we automatically take account of the fact that it may be
681 * dangling off an instantiation key
682 */
683 skey = search_process_keyrings(key->type, key,
684 keyctl_read_key_same, current);
685 if (!IS_ERR(skey))
686 goto can_read_key2;
687
688 ret = PTR_ERR(skey);
689 if (ret == -EAGAIN)
690 ret = -EACCES;
691 goto error2;
692 } 674 }
693 675
694 ret = -ENOKEY; 676 key = key_ref_to_ptr(key_ref);
695 goto error; 677
678 /* see if we can read it directly */
679 if (key_permission(key_ref, KEY_READ))
680 goto can_read_key;
681
682 /* we can't; see if it's searchable from this process's keyrings
683 * - we automatically take account of the fact that it may be
684 * dangling off an instantiation key
685 */
686 if (!is_key_possessed(key_ref)) {
687 ret = -EACCES;
688 goto error2;
689 }
696 690
697 /* the key is probably readable - now try to read it */ 691 /* the key is probably readable - now try to read it */
698 can_read_key2:
699 key_put(skey);
700 can_read_key: 692 can_read_key:
701 ret = key_validate(key); 693 ret = key_validate(key);
702 if (ret == 0) { 694 if (ret == 0) {
@@ -727,18 +719,21 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
727long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) 719long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
728{ 720{
729 struct key *key; 721 struct key *key;
722 key_ref_t key_ref;
730 long ret; 723 long ret;
731 724
732 ret = 0; 725 ret = 0;
733 if (uid == (uid_t) -1 && gid == (gid_t) -1) 726 if (uid == (uid_t) -1 && gid == (gid_t) -1)
734 goto error; 727 goto error;
735 728
736 key = lookup_user_key(NULL, id, 1, 1, 0); 729 key_ref = lookup_user_key(NULL, id, 1, 1, 0);
737 if (IS_ERR(key)) { 730 if (IS_ERR(key_ref)) {
738 ret = PTR_ERR(key); 731 ret = PTR_ERR(key_ref);
739 goto error; 732 goto error;
740 } 733 }
741 734
735 key = key_ref_to_ptr(key_ref);
736
742 /* make the changes with the locks held to prevent chown/chown races */ 737 /* make the changes with the locks held to prevent chown/chown races */
743 ret = -EACCES; 738 ret = -EACCES;
744 down_write(&key->sem); 739 down_write(&key->sem);
@@ -784,18 +779,21 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
784long keyctl_setperm_key(key_serial_t id, key_perm_t perm) 779long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
785{ 780{
786 struct key *key; 781 struct key *key;
782 key_ref_t key_ref;
787 long ret; 783 long ret;
788 784
789 ret = -EINVAL; 785 ret = -EINVAL;
790 if (perm & ~(KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) 786 if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
791 goto error; 787 goto error;
792 788
793 key = lookup_user_key(NULL, id, 1, 1, 0); 789 key_ref = lookup_user_key(NULL, id, 1, 1, 0);
794 if (IS_ERR(key)) { 790 if (IS_ERR(key_ref)) {
795 ret = PTR_ERR(key); 791 ret = PTR_ERR(key_ref);
796 goto error; 792 goto error;
797 } 793 }
798 794
795 key = key_ref_to_ptr(key_ref);
796
799 /* make the changes with the locks held to prevent chown/chmod races */ 797 /* make the changes with the locks held to prevent chown/chmod races */
800 ret = -EACCES; 798 ret = -EACCES;
801 down_write(&key->sem); 799 down_write(&key->sem);
@@ -824,7 +822,8 @@ long keyctl_instantiate_key(key_serial_t id,
824 key_serial_t ringid) 822 key_serial_t ringid)
825{ 823{
826 struct request_key_auth *rka; 824 struct request_key_auth *rka;
827 struct key *instkey, *keyring; 825 struct key *instkey;
826 key_ref_t keyring_ref;
828 void *payload; 827 void *payload;
829 long ret; 828 long ret;
830 829
@@ -857,21 +856,21 @@ long keyctl_instantiate_key(key_serial_t id,
857 856
858 /* find the destination keyring amongst those belonging to the 857 /* find the destination keyring amongst those belonging to the
859 * requesting task */ 858 * requesting task */
860 keyring = NULL; 859 keyring_ref = NULL;
861 if (ringid) { 860 if (ringid) {
862 keyring = lookup_user_key(rka->context, ringid, 1, 0, 861 keyring_ref = lookup_user_key(rka->context, ringid, 1, 0,
863 KEY_WRITE); 862 KEY_WRITE);
864 if (IS_ERR(keyring)) { 863 if (IS_ERR(keyring_ref)) {
865 ret = PTR_ERR(keyring); 864 ret = PTR_ERR(keyring_ref);
866 goto error3; 865 goto error3;
867 } 866 }
868 } 867 }
869 868
870 /* instantiate the key and link it into a keyring */ 869 /* instantiate the key and link it into a keyring */
871 ret = key_instantiate_and_link(rka->target_key, payload, plen, 870 ret = key_instantiate_and_link(rka->target_key, payload, plen,
872 keyring, instkey); 871 key_ref_to_ptr(keyring_ref), instkey);
873 872
874 key_put(keyring); 873 key_ref_put(keyring_ref);
875 error3: 874 error3:
876 key_put(instkey); 875 key_put(instkey);
877 error2: 876 error2:
@@ -889,7 +888,8 @@ long keyctl_instantiate_key(key_serial_t id,
889long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) 888long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
890{ 889{
891 struct request_key_auth *rka; 890 struct request_key_auth *rka;
892 struct key *instkey, *keyring; 891 struct key *instkey;
892 key_ref_t keyring_ref;
893 long ret; 893 long ret;
894 894
895 /* find the instantiation authorisation key */ 895 /* find the instantiation authorisation key */
@@ -903,19 +903,20 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
903 903
904 /* find the destination keyring if present (which must also be 904 /* find the destination keyring if present (which must also be
905 * writable) */ 905 * writable) */
906 keyring = NULL; 906 keyring_ref = NULL;
907 if (ringid) { 907 if (ringid) {
908 keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); 908 keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE);
909 if (IS_ERR(keyring)) { 909 if (IS_ERR(keyring_ref)) {
910 ret = PTR_ERR(keyring); 910 ret = PTR_ERR(keyring_ref);
911 goto error2; 911 goto error2;
912 } 912 }
913 } 913 }
914 914
915 /* instantiate the key and link it into a keyring */ 915 /* instantiate the key and link it into a keyring */
916 ret = key_negate_and_link(rka->target_key, timeout, keyring, instkey); 916 ret = key_negate_and_link(rka->target_key, timeout,
917 key_ref_to_ptr(keyring_ref), instkey);
917 918
918 key_put(keyring); 919 key_ref_put(keyring_ref);
919 error2: 920 error2:
920 key_put(instkey); 921 key_put(instkey);
921 error: 922 error:
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 9c208c756df8..0639396dd441 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -309,7 +309,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
309 int ret; 309 int ret;
310 310
311 keyring = key_alloc(&key_type_keyring, description, 311 keyring = key_alloc(&key_type_keyring, description,
312 uid, gid, KEY_USR_ALL, not_in_quota); 312 uid, gid, KEY_POS_ALL | KEY_USR_ALL, not_in_quota);
313 313
314 if (!IS_ERR(keyring)) { 314 if (!IS_ERR(keyring)) {
315 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); 315 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
@@ -333,12 +333,13 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
333 * - we rely on RCU to prevent the keyring lists from disappearing on us 333 * - we rely on RCU to prevent the keyring lists from disappearing on us
334 * - we return -EAGAIN if we didn't find any matching key 334 * - we return -EAGAIN if we didn't find any matching key
335 * - we return -ENOKEY if we only found negative matching keys 335 * - we return -ENOKEY if we only found negative matching keys
336 * - we propagate the possession attribute from the keyring ref to the key ref
336 */ 337 */
337struct key *keyring_search_aux(struct key *keyring, 338key_ref_t keyring_search_aux(key_ref_t keyring_ref,
338 struct task_struct *context, 339 struct task_struct *context,
339 struct key_type *type, 340 struct key_type *type,
340 const void *description, 341 const void *description,
341 key_match_func_t match) 342 key_match_func_t match)
342{ 343{
343 struct { 344 struct {
344 struct keyring_list *keylist; 345 struct keyring_list *keylist;
@@ -347,29 +348,33 @@ struct key *keyring_search_aux(struct key *keyring,
347 348
348 struct keyring_list *keylist; 349 struct keyring_list *keylist;
349 struct timespec now; 350 struct timespec now;
350 struct key *key; 351 unsigned long possessed;
352 struct key *keyring, *key;
353 key_ref_t key_ref;
351 long err; 354 long err;
352 int sp, kix; 355 int sp, kix;
353 356
357 keyring = key_ref_to_ptr(keyring_ref);
358 possessed = is_key_possessed(keyring_ref);
354 key_check(keyring); 359 key_check(keyring);
355 360
356 rcu_read_lock();
357
358 /* top keyring must have search permission to begin the search */ 361 /* top keyring must have search permission to begin the search */
359 key = ERR_PTR(-EACCES); 362 key_ref = ERR_PTR(-EACCES);
360 if (!key_task_permission(keyring, context, KEY_SEARCH)) 363 if (!key_task_permission(keyring_ref, context, KEY_SEARCH))
361 goto error; 364 goto error;
362 365
363 key = ERR_PTR(-ENOTDIR); 366 key_ref = ERR_PTR(-ENOTDIR);
364 if (keyring->type != &key_type_keyring) 367 if (keyring->type != &key_type_keyring)
365 goto error; 368 goto error;
366 369
370 rcu_read_lock();
371
367 now = current_kernel_time(); 372 now = current_kernel_time();
368 err = -EAGAIN; 373 err = -EAGAIN;
369 sp = 0; 374 sp = 0;
370 375
371 /* start processing a new keyring */ 376 /* start processing a new keyring */
372 descend: 377descend:
373 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) 378 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
374 goto not_this_keyring; 379 goto not_this_keyring;
375 380
@@ -397,7 +402,8 @@ struct key *keyring_search_aux(struct key *keyring,
397 continue; 402 continue;
398 403
399 /* key must have search permissions */ 404 /* key must have search permissions */
400 if (!key_task_permission(key, context, KEY_SEARCH)) 405 if (!key_task_permission(make_key_ref(key, possessed),
406 context, KEY_SEARCH))
401 continue; 407 continue;
402 408
403 /* we set a different error code if we find a negative key */ 409 /* we set a different error code if we find a negative key */
@@ -411,7 +417,7 @@ struct key *keyring_search_aux(struct key *keyring,
411 417
412 /* search through the keyrings nested in this one */ 418 /* search through the keyrings nested in this one */
413 kix = 0; 419 kix = 0;
414 ascend: 420ascend:
415 for (; kix < keylist->nkeys; kix++) { 421 for (; kix < keylist->nkeys; kix++) {
416 key = keylist->keys[kix]; 422 key = keylist->keys[kix];
417 if (key->type != &key_type_keyring) 423 if (key->type != &key_type_keyring)
@@ -423,7 +429,8 @@ struct key *keyring_search_aux(struct key *keyring,
423 if (sp >= KEYRING_SEARCH_MAX_DEPTH) 429 if (sp >= KEYRING_SEARCH_MAX_DEPTH)
424 continue; 430 continue;
425 431
426 if (!key_task_permission(key, context, KEY_SEARCH)) 432 if (!key_task_permission(make_key_ref(key, possessed),
433 context, KEY_SEARCH))
427 continue; 434 continue;
428 435
429 /* stack the current position */ 436 /* stack the current position */
@@ -438,7 +445,7 @@ struct key *keyring_search_aux(struct key *keyring,
438 445
439 /* the keyring we're looking at was disqualified or didn't contain a 446 /* the keyring we're looking at was disqualified or didn't contain a
440 * matching key */ 447 * matching key */
441 not_this_keyring: 448not_this_keyring:
442 if (sp > 0) { 449 if (sp > 0) {
443 /* resume the processing of a keyring higher up in the tree */ 450 /* resume the processing of a keyring higher up in the tree */
444 sp--; 451 sp--;
@@ -447,16 +454,18 @@ struct key *keyring_search_aux(struct key *keyring,
447 goto ascend; 454 goto ascend;
448 } 455 }
449 456
450 key = ERR_PTR(err); 457 key_ref = ERR_PTR(err);
451 goto error; 458 goto error_2;
452 459
453 /* we found a viable match */ 460 /* we found a viable match */
454 found: 461found:
455 atomic_inc(&key->usage); 462 atomic_inc(&key->usage);
456 key_check(key); 463 key_check(key);
457 error: 464 key_ref = make_key_ref(key, possessed);
465error_2:
458 rcu_read_unlock(); 466 rcu_read_unlock();
459 return key; 467error:
468 return key_ref;
460 469
461} /* end keyring_search_aux() */ 470} /* end keyring_search_aux() */
462 471
@@ -469,9 +478,9 @@ struct key *keyring_search_aux(struct key *keyring,
469 * - we return -EAGAIN if we didn't find any matching key 478 * - we return -EAGAIN if we didn't find any matching key
470 * - we return -ENOKEY if we only found negative matching keys 479 * - we return -ENOKEY if we only found negative matching keys
471 */ 480 */
472struct key *keyring_search(struct key *keyring, 481key_ref_t keyring_search(key_ref_t keyring,
473 struct key_type *type, 482 struct key_type *type,
474 const char *description) 483 const char *description)
475{ 484{
476 if (!type->match) 485 if (!type->match)
477 return ERR_PTR(-ENOKEY); 486 return ERR_PTR(-ENOKEY);
@@ -488,15 +497,19 @@ EXPORT_SYMBOL(keyring_search);
488 * search the given keyring only (no recursion) 497 * search the given keyring only (no recursion)
489 * - keyring must be locked by caller 498 * - keyring must be locked by caller
490 */ 499 */
491struct key *__keyring_search_one(struct key *keyring, 500key_ref_t __keyring_search_one(key_ref_t keyring_ref,
492 const struct key_type *ktype, 501 const struct key_type *ktype,
493 const char *description, 502 const char *description,
494 key_perm_t perm) 503 key_perm_t perm)
495{ 504{
496 struct keyring_list *klist; 505 struct keyring_list *klist;
497 struct key *key; 506 unsigned long possessed;
507 struct key *keyring, *key;
498 int loop; 508 int loop;
499 509
510 keyring = key_ref_to_ptr(keyring_ref);
511 possessed = is_key_possessed(keyring_ref);
512
500 rcu_read_lock(); 513 rcu_read_lock();
501 514
502 klist = rcu_dereference(keyring->payload.subscriptions); 515 klist = rcu_dereference(keyring->payload.subscriptions);
@@ -507,21 +520,21 @@ struct key *__keyring_search_one(struct key *keyring,
507 if (key->type == ktype && 520 if (key->type == ktype &&
508 (!key->type->match || 521 (!key->type->match ||
509 key->type->match(key, description)) && 522 key->type->match(key, description)) &&
510 key_permission(key, perm) && 523 key_permission(make_key_ref(key, possessed),
524 perm) &&
511 !test_bit(KEY_FLAG_REVOKED, &key->flags) 525 !test_bit(KEY_FLAG_REVOKED, &key->flags)
512 ) 526 )
513 goto found; 527 goto found;
514 } 528 }
515 } 529 }
516 530
517 key = ERR_PTR(-ENOKEY); 531 rcu_read_unlock();
518 goto error; 532 return ERR_PTR(-ENOKEY);
519 533
520 found: 534 found:
521 atomic_inc(&key->usage); 535 atomic_inc(&key->usage);
522 error:
523 rcu_read_unlock(); 536 rcu_read_unlock();
524 return key; 537 return make_key_ref(key, possessed);
525 538
526} /* end __keyring_search_one() */ 539} /* end __keyring_search_one() */
527 540
@@ -603,7 +616,8 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound)
603 if (strcmp(keyring->description, name) != 0) 616 if (strcmp(keyring->description, name) != 0)
604 continue; 617 continue;
605 618
606 if (!key_permission(keyring, KEY_SEARCH)) 619 if (!key_permission(make_key_ref(keyring, 0),
620 KEY_SEARCH))
607 continue; 621 continue;
608 622
609 /* found a potential candidate, but we still need to 623 /* found a potential candidate, but we still need to
diff --git a/security/keys/proc.c b/security/keys/proc.c
index c55cf1fd0826..12b750e51fbf 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -167,7 +167,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
167#define showflag(KEY, LETTER, FLAG) \ 167#define showflag(KEY, LETTER, FLAG) \
168 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') 168 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
169 169
170 seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ", 170 seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
171 key->serial, 171 key->serial,
172 showflag(key, 'I', KEY_FLAG_INSTANTIATED), 172 showflag(key, 'I', KEY_FLAG_INSTANTIATED),
173 showflag(key, 'R', KEY_FLAG_REVOKED), 173 showflag(key, 'R', KEY_FLAG_REVOKED),
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index c089f78fb94e..d42d2158ce13 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -39,7 +39,7 @@ struct key root_user_keyring = {
39 .type = &key_type_keyring, 39 .type = &key_type_keyring,
40 .user = &root_key_user, 40 .user = &root_key_user,
41 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), 41 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem),
42 .perm = KEY_USR_ALL, 42 .perm = KEY_POS_ALL | KEY_USR_ALL,
43 .flags = 1 << KEY_FLAG_INSTANTIATED, 43 .flags = 1 << KEY_FLAG_INSTANTIATED,
44 .description = "_uid.0", 44 .description = "_uid.0",
45#ifdef KEY_DEBUGGING 45#ifdef KEY_DEBUGGING
@@ -54,7 +54,7 @@ struct key root_session_keyring = {
54 .type = &key_type_keyring, 54 .type = &key_type_keyring,
55 .user = &root_key_user, 55 .user = &root_key_user,
56 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), 56 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem),
57 .perm = KEY_USR_ALL, 57 .perm = KEY_POS_ALL | KEY_USR_ALL,
58 .flags = 1 << KEY_FLAG_INSTANTIATED, 58 .flags = 1 << KEY_FLAG_INSTANTIATED,
59 .description = "_uid_ses.0", 59 .description = "_uid_ses.0",
60#ifdef KEY_DEBUGGING 60#ifdef KEY_DEBUGGING
@@ -98,7 +98,7 @@ int alloc_uid_keyring(struct user_struct *user)
98 user->session_keyring = session_keyring; 98 user->session_keyring = session_keyring;
99 ret = 0; 99 ret = 0;
100 100
101 error: 101error:
102 return ret; 102 return ret;
103 103
104} /* end alloc_uid_keyring() */ 104} /* end alloc_uid_keyring() */
@@ -156,7 +156,7 @@ int install_thread_keyring(struct task_struct *tsk)
156 ret = 0; 156 ret = 0;
157 157
158 key_put(old); 158 key_put(old);
159 error: 159error:
160 return ret; 160 return ret;
161 161
162} /* end install_thread_keyring() */ 162} /* end install_thread_keyring() */
@@ -193,7 +193,7 @@ int install_process_keyring(struct task_struct *tsk)
193 } 193 }
194 194
195 ret = 0; 195 ret = 0;
196 error: 196error:
197 return ret; 197 return ret;
198 198
199} /* end install_process_keyring() */ 199} /* end install_process_keyring() */
@@ -236,7 +236,7 @@ static int install_session_keyring(struct task_struct *tsk,
236 /* we're using RCU on the pointer */ 236 /* we're using RCU on the pointer */
237 synchronize_rcu(); 237 synchronize_rcu();
238 key_put(old); 238 key_put(old);
239 error: 239error:
240 return ret; 240 return ret;
241 241
242} /* end install_session_keyring() */ 242} /* end install_session_keyring() */
@@ -376,13 +376,13 @@ void key_fsgid_changed(struct task_struct *tsk)
376 * - we return -EAGAIN if we didn't find any matching key 376 * - we return -EAGAIN if we didn't find any matching key
377 * - we return -ENOKEY if we found only negative matching keys 377 * - we return -ENOKEY if we found only negative matching keys
378 */ 378 */
379struct key *search_process_keyrings(struct key_type *type, 379key_ref_t search_process_keyrings(struct key_type *type,
380 const void *description, 380 const void *description,
381 key_match_func_t match, 381 key_match_func_t match,
382 struct task_struct *context) 382 struct task_struct *context)
383{ 383{
384 struct request_key_auth *rka; 384 struct request_key_auth *rka;
385 struct key *key, *ret, *err, *instkey; 385 key_ref_t key_ref, ret, err, instkey_ref;
386 386
387 /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were 387 /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
388 * searchable, but we failed to find a key or we found a negative key; 388 * searchable, but we failed to find a key or we found a negative key;
@@ -391,46 +391,48 @@ struct key *search_process_keyrings(struct key_type *type,
391 * 391 *
392 * in terms of priority: success > -ENOKEY > -EAGAIN > other error 392 * in terms of priority: success > -ENOKEY > -EAGAIN > other error
393 */ 393 */
394 key = NULL; 394 key_ref = NULL;
395 ret = NULL; 395 ret = NULL;
396 err = ERR_PTR(-EAGAIN); 396 err = ERR_PTR(-EAGAIN);
397 397
398 /* search the thread keyring first */ 398 /* search the thread keyring first */
399 if (context->thread_keyring) { 399 if (context->thread_keyring) {
400 key = keyring_search_aux(context->thread_keyring, 400 key_ref = keyring_search_aux(
401 context, type, description, match); 401 make_key_ref(context->thread_keyring, 1),
402 if (!IS_ERR(key)) 402 context, type, description, match);
403 if (!IS_ERR(key_ref))
403 goto found; 404 goto found;
404 405
405 switch (PTR_ERR(key)) { 406 switch (PTR_ERR(key_ref)) {
406 case -EAGAIN: /* no key */ 407 case -EAGAIN: /* no key */
407 if (ret) 408 if (ret)
408 break; 409 break;
409 case -ENOKEY: /* negative key */ 410 case -ENOKEY: /* negative key */
410 ret = key; 411 ret = key_ref;
411 break; 412 break;
412 default: 413 default:
413 err = key; 414 err = key_ref;
414 break; 415 break;
415 } 416 }
416 } 417 }
417 418
418 /* search the process keyring second */ 419 /* search the process keyring second */
419 if (context->signal->process_keyring) { 420 if (context->signal->process_keyring) {
420 key = keyring_search_aux(context->signal->process_keyring, 421 key_ref = keyring_search_aux(
421 context, type, description, match); 422 make_key_ref(context->signal->process_keyring, 1),
422 if (!IS_ERR(key)) 423 context, type, description, match);
424 if (!IS_ERR(key_ref))
423 goto found; 425 goto found;
424 426
425 switch (PTR_ERR(key)) { 427 switch (PTR_ERR(key_ref)) {
426 case -EAGAIN: /* no key */ 428 case -EAGAIN: /* no key */
427 if (ret) 429 if (ret)
428 break; 430 break;
429 case -ENOKEY: /* negative key */ 431 case -ENOKEY: /* negative key */
430 ret = key; 432 ret = key_ref;
431 break; 433 break;
432 default: 434 default:
433 err = key; 435 err = key_ref;
434 break; 436 break;
435 } 437 }
436 } 438 }
@@ -438,23 +440,25 @@ struct key *search_process_keyrings(struct key_type *type,
438 /* search the session keyring */ 440 /* search the session keyring */
439 if (context->signal->session_keyring) { 441 if (context->signal->session_keyring) {
440 rcu_read_lock(); 442 rcu_read_lock();
441 key = keyring_search_aux( 443 key_ref = keyring_search_aux(
442 rcu_dereference(context->signal->session_keyring), 444 make_key_ref(rcu_dereference(
445 context->signal->session_keyring),
446 1),
443 context, type, description, match); 447 context, type, description, match);
444 rcu_read_unlock(); 448 rcu_read_unlock();
445 449
446 if (!IS_ERR(key)) 450 if (!IS_ERR(key_ref))
447 goto found; 451 goto found;
448 452
449 switch (PTR_ERR(key)) { 453 switch (PTR_ERR(key_ref)) {
450 case -EAGAIN: /* no key */ 454 case -EAGAIN: /* no key */
451 if (ret) 455 if (ret)
452 break; 456 break;
453 case -ENOKEY: /* negative key */ 457 case -ENOKEY: /* negative key */
454 ret = key; 458 ret = key_ref;
455 break; 459 break;
456 default: 460 default:
457 err = key; 461 err = key_ref;
458 break; 462 break;
459 } 463 }
460 464
@@ -465,51 +469,54 @@ struct key *search_process_keyrings(struct key_type *type,
465 goto no_key; 469 goto no_key;
466 470
467 rcu_read_lock(); 471 rcu_read_lock();
468 instkey = __keyring_search_one( 472 instkey_ref = __keyring_search_one(
469 rcu_dereference(context->signal->session_keyring), 473 make_key_ref(rcu_dereference(
474 context->signal->session_keyring),
475 1),
470 &key_type_request_key_auth, NULL, 0); 476 &key_type_request_key_auth, NULL, 0);
471 rcu_read_unlock(); 477 rcu_read_unlock();
472 478
473 if (IS_ERR(instkey)) 479 if (IS_ERR(instkey_ref))
474 goto no_key; 480 goto no_key;
475 481
476 rka = instkey->payload.data; 482 rka = key_ref_to_ptr(instkey_ref)->payload.data;
477 483
478 key = search_process_keyrings(type, description, match, 484 key_ref = search_process_keyrings(type, description, match,
479 rka->context); 485 rka->context);
480 key_put(instkey); 486 key_ref_put(instkey_ref);
481 487
482 if (!IS_ERR(key)) 488 if (!IS_ERR(key_ref))
483 goto found; 489 goto found;
484 490
485 switch (PTR_ERR(key)) { 491 switch (PTR_ERR(key_ref)) {
486 case -EAGAIN: /* no key */ 492 case -EAGAIN: /* no key */
487 if (ret) 493 if (ret)
488 break; 494 break;
489 case -ENOKEY: /* negative key */ 495 case -ENOKEY: /* negative key */
490 ret = key; 496 ret = key_ref;
491 break; 497 break;
492 default: 498 default:
493 err = key; 499 err = key_ref;
494 break; 500 break;
495 } 501 }
496 } 502 }
497 /* or search the user-session keyring */ 503 /* or search the user-session keyring */
498 else { 504 else {
499 key = keyring_search_aux(context->user->session_keyring, 505 key_ref = keyring_search_aux(
500 context, type, description, match); 506 make_key_ref(context->user->session_keyring, 1),
501 if (!IS_ERR(key)) 507 context, type, description, match);
508 if (!IS_ERR(key_ref))
502 goto found; 509 goto found;
503 510
504 switch (PTR_ERR(key)) { 511 switch (PTR_ERR(key_ref)) {
505 case -EAGAIN: /* no key */ 512 case -EAGAIN: /* no key */
506 if (ret) 513 if (ret)
507 break; 514 break;
508 case -ENOKEY: /* negative key */ 515 case -ENOKEY: /* negative key */
509 ret = key; 516 ret = key_ref;
510 break; 517 break;
511 default: 518 default:
512 err = key; 519 err = key_ref;
513 break; 520 break;
514 } 521 }
515 } 522 }
@@ -517,29 +524,40 @@ struct key *search_process_keyrings(struct key_type *type,
517 524
518no_key: 525no_key:
519 /* no key - decide on the error we're going to go for */ 526 /* no key - decide on the error we're going to go for */
520 key = ret ? ret : err; 527 key_ref = ret ? ret : err;
521 528
522found: 529found:
523 return key; 530 return key_ref;
524 531
525} /* end search_process_keyrings() */ 532} /* end search_process_keyrings() */
526 533
527/*****************************************************************************/ 534/*****************************************************************************/
528/* 535/*
536 * see if the key we're looking at is the target key
537 */
538static int lookup_user_key_possessed(const struct key *key, const void *target)
539{
540 return key == target;
541
542} /* end lookup_user_key_possessed() */
543
544/*****************************************************************************/
545/*
529 * lookup a key given a key ID from userspace with a given permissions mask 546 * lookup a key given a key ID from userspace with a given permissions mask
530 * - don't create special keyrings unless so requested 547 * - don't create special keyrings unless so requested
531 * - partially constructed keys aren't found unless requested 548 * - partially constructed keys aren't found unless requested
532 */ 549 */
533struct key *lookup_user_key(struct task_struct *context, key_serial_t id, 550key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
534 int create, int partial, key_perm_t perm) 551 int create, int partial, key_perm_t perm)
535{ 552{
553 key_ref_t key_ref, skey_ref;
536 struct key *key; 554 struct key *key;
537 int ret; 555 int ret;
538 556
539 if (!context) 557 if (!context)
540 context = current; 558 context = current;
541 559
542 key = ERR_PTR(-ENOKEY); 560 key_ref = ERR_PTR(-ENOKEY);
543 561
544 switch (id) { 562 switch (id) {
545 case KEY_SPEC_THREAD_KEYRING: 563 case KEY_SPEC_THREAD_KEYRING:
@@ -556,6 +574,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
556 574
557 key = context->thread_keyring; 575 key = context->thread_keyring;
558 atomic_inc(&key->usage); 576 atomic_inc(&key->usage);
577 key_ref = make_key_ref(key, 1);
559 break; 578 break;
560 579
561 case KEY_SPEC_PROCESS_KEYRING: 580 case KEY_SPEC_PROCESS_KEYRING:
@@ -572,6 +591,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
572 591
573 key = context->signal->process_keyring; 592 key = context->signal->process_keyring;
574 atomic_inc(&key->usage); 593 atomic_inc(&key->usage);
594 key_ref = make_key_ref(key, 1);
575 break; 595 break;
576 596
577 case KEY_SPEC_SESSION_KEYRING: 597 case KEY_SPEC_SESSION_KEYRING:
@@ -579,7 +599,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
579 /* always install a session keyring upon access if one 599 /* always install a session keyring upon access if one
580 * doesn't exist yet */ 600 * doesn't exist yet */
581 ret = install_session_keyring( 601 ret = install_session_keyring(
582 context, context->user->session_keyring); 602 context, context->user->session_keyring);
583 if (ret < 0) 603 if (ret < 0)
584 goto error; 604 goto error;
585 } 605 }
@@ -588,16 +608,19 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
588 key = rcu_dereference(context->signal->session_keyring); 608 key = rcu_dereference(context->signal->session_keyring);
589 atomic_inc(&key->usage); 609 atomic_inc(&key->usage);
590 rcu_read_unlock(); 610 rcu_read_unlock();
611 key_ref = make_key_ref(key, 1);
591 break; 612 break;
592 613
593 case KEY_SPEC_USER_KEYRING: 614 case KEY_SPEC_USER_KEYRING:
594 key = context->user->uid_keyring; 615 key = context->user->uid_keyring;
595 atomic_inc(&key->usage); 616 atomic_inc(&key->usage);
617 key_ref = make_key_ref(key, 1);
596 break; 618 break;
597 619
598 case KEY_SPEC_USER_SESSION_KEYRING: 620 case KEY_SPEC_USER_SESSION_KEYRING:
599 key = context->user->session_keyring; 621 key = context->user->session_keyring;
600 atomic_inc(&key->usage); 622 atomic_inc(&key->usage);
623 key_ref = make_key_ref(key, 1);
601 break; 624 break;
602 625
603 case KEY_SPEC_GROUP_KEYRING: 626 case KEY_SPEC_GROUP_KEYRING:
@@ -606,13 +629,28 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
606 goto error; 629 goto error;
607 630
608 default: 631 default:
609 key = ERR_PTR(-EINVAL); 632 key_ref = ERR_PTR(-EINVAL);
610 if (id < 1) 633 if (id < 1)
611 goto error; 634 goto error;
612 635
613 key = key_lookup(id); 636 key = key_lookup(id);
614 if (IS_ERR(key)) 637 if (IS_ERR(key)) {
638 key_ref = ERR_PTR(PTR_ERR(key));
615 goto error; 639 goto error;
640 }
641
642 key_ref = make_key_ref(key, 0);
643
644 /* check to see if we possess the key */
645 skey_ref = search_process_keyrings(key->type, key,
646 lookup_user_key_possessed,
647 current);
648
649 if (!IS_ERR(skey_ref)) {
650 key_put(key);
651 key_ref = skey_ref;
652 }
653
616 break; 654 break;
617 } 655 }
618 656
@@ -630,15 +668,15 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id,
630 /* check the permissions */ 668 /* check the permissions */
631 ret = -EACCES; 669 ret = -EACCES;
632 670
633 if (!key_task_permission(key, context, perm)) 671 if (!key_task_permission(key_ref, context, perm))
634 goto invalid_key; 672 goto invalid_key;
635 673
636 error: 674error:
637 return key; 675 return key_ref;
638 676
639 invalid_key: 677invalid_key:
640 key_put(key); 678 key_ref_put(key_ref);
641 key = ERR_PTR(ret); 679 key_ref = ERR_PTR(ret);
642 goto error; 680 goto error;
643 681
644} /* end lookup_user_key() */ 682} /* end lookup_user_key() */
@@ -694,9 +732,9 @@ long join_session_keyring(const char *name)
694 ret = keyring->serial; 732 ret = keyring->serial;
695 key_put(keyring); 733 key_put(keyring);
696 734
697 error2: 735error2:
698 up(&key_session_sem); 736 up(&key_session_sem);
699 error: 737error:
700 return ret; 738 return ret;
701 739
702} /* end join_session_keyring() */ 740} /* end join_session_keyring() */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 90c1506d007c..e6dd366d43a3 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -129,7 +129,7 @@ static struct key *__request_key_construction(struct key_type *type,
129 129
130 /* create a key and add it to the queue */ 130 /* create a key and add it to the queue */
131 key = key_alloc(type, description, 131 key = key_alloc(type, description,
132 current->fsuid, current->fsgid, KEY_USR_ALL, 0); 132 current->fsuid, current->fsgid, KEY_POS_ALL, 0);
133 if (IS_ERR(key)) 133 if (IS_ERR(key))
134 goto alloc_failed; 134 goto alloc_failed;
135 135
@@ -365,14 +365,24 @@ struct key *request_key_and_link(struct key_type *type,
365{ 365{
366 struct key_user *user; 366 struct key_user *user;
367 struct key *key; 367 struct key *key;
368 key_ref_t key_ref;
368 369
369 kenter("%s,%s,%s,%p", 370 kenter("%s,%s,%s,%p",
370 type->name, description, callout_info, dest_keyring); 371 type->name, description, callout_info, dest_keyring);
371 372
372 /* search all the process keyrings for a key */ 373 /* search all the process keyrings for a key */
373 key = search_process_keyrings(type, description, type->match, current); 374 key_ref = search_process_keyrings(type, description, type->match,
375 current);
374 376
375 if (PTR_ERR(key) == -EAGAIN) { 377 kdebug("search 1: %p", key_ref);
378
379 if (!IS_ERR(key_ref)) {
380 key = key_ref_to_ptr(key_ref);
381 }
382 else if (PTR_ERR(key_ref) != -EAGAIN) {
383 key = ERR_PTR(PTR_ERR(key_ref));
384 }
385 else {
376 /* the search failed, but the keyrings were searchable, so we 386 /* the search failed, but the keyrings were searchable, so we
377 * should consult userspace if we can */ 387 * should consult userspace if we can */
378 key = ERR_PTR(-ENOKEY); 388 key = ERR_PTR(-ENOKEY);
@@ -384,7 +394,7 @@ struct key *request_key_and_link(struct key_type *type,
384 if (!user) 394 if (!user)
385 goto nomem; 395 goto nomem;
386 396
387 do { 397 for (;;) {
388 if (signal_pending(current)) 398 if (signal_pending(current))
389 goto interrupted; 399 goto interrupted;
390 400
@@ -397,10 +407,22 @@ struct key *request_key_and_link(struct key_type *type,
397 407
398 /* someone else made the key we want, so we need to 408 /* someone else made the key we want, so we need to
399 * search again as it might now be available to us */ 409 * search again as it might now be available to us */
400 key = search_process_keyrings(type, description, 410 key_ref = search_process_keyrings(type, description,
401 type->match, current); 411 type->match,
412 current);
413
414 kdebug("search 2: %p", key_ref);
402 415
403 } while (PTR_ERR(key) == -EAGAIN); 416 if (!IS_ERR(key_ref)) {
417 key = key_ref_to_ptr(key_ref);
418 break;
419 }
420
421 if (PTR_ERR(key_ref) != -EAGAIN) {
422 key = ERR_PTR(PTR_ERR(key_ref));
423 break;
424 }
425 }
404 426
405 key_user_put(user); 427 key_user_put(user);
406 428
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index f22264632229..1ecd3d3fa9f8 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -126,7 +126,7 @@ struct key *request_key_auth_new(struct key *target, struct key **_rkakey)
126 126
127 rkakey = key_alloc(&key_type_request_key_auth, desc, 127 rkakey = key_alloc(&key_type_request_key_auth, desc,
128 current->fsuid, current->fsgid, 128 current->fsuid, current->fsgid,
129 KEY_USR_VIEW, 1); 129 KEY_POS_VIEW | KEY_USR_VIEW, 1);
130 if (IS_ERR(rkakey)) { 130 if (IS_ERR(rkakey)) {
131 key_put(keyring); 131 key_put(keyring);
132 kleave("= %ld", PTR_ERR(rkakey)); 132 kleave("= %ld", PTR_ERR(rkakey));
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6e4937fe062b..b13be15165f5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -630,6 +630,16 @@ static inline u16 inode_mode_to_security_class(umode_t mode)
630 return SECCLASS_FILE; 630 return SECCLASS_FILE;
631} 631}
632 632
633static inline int default_protocol_stream(int protocol)
634{
635 return (protocol == IPPROTO_IP || protocol == IPPROTO_TCP);
636}
637
638static inline int default_protocol_dgram(int protocol)
639{
640 return (protocol == IPPROTO_IP || protocol == IPPROTO_UDP);
641}
642
633static inline u16 socket_type_to_security_class(int family, int type, int protocol) 643static inline u16 socket_type_to_security_class(int family, int type, int protocol)
634{ 644{
635 switch (family) { 645 switch (family) {
@@ -646,10 +656,16 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
646 case PF_INET6: 656 case PF_INET6:
647 switch (type) { 657 switch (type) {
648 case SOCK_STREAM: 658 case SOCK_STREAM:
649 return SECCLASS_TCP_SOCKET; 659 if (default_protocol_stream(protocol))
660 return SECCLASS_TCP_SOCKET;
661 else
662 return SECCLASS_RAWIP_SOCKET;
650 case SOCK_DGRAM: 663 case SOCK_DGRAM:
651 return SECCLASS_UDP_SOCKET; 664 if (default_protocol_dgram(protocol))
652 case SOCK_RAW: 665 return SECCLASS_UDP_SOCKET;
666 else
667 return SECCLASS_RAWIP_SOCKET;
668 default:
653 return SECCLASS_RAWIP_SOCKET; 669 return SECCLASS_RAWIP_SOCKET;
654 } 670 }
655 break; 671 break;
@@ -2970,6 +2986,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
2970 2986
2971 /* 2987 /*
2972 * If PF_INET or PF_INET6, check name_bind permission for the port. 2988 * If PF_INET or PF_INET6, check name_bind permission for the port.
2989 * Multiple address binding for SCTP is not supported yet: we just
2990 * check the first address now.
2973 */ 2991 */
2974 family = sock->sk->sk_family; 2992 family = sock->sk->sk_family;
2975 if (family == PF_INET || family == PF_INET6) { 2993 if (family == PF_INET || family == PF_INET6) {
@@ -3014,12 +3032,12 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
3014 goto out; 3032 goto out;
3015 } 3033 }
3016 3034
3017 switch(sk->sk_protocol) { 3035 switch(isec->sclass) {
3018 case IPPROTO_TCP: 3036 case SECCLASS_TCP_SOCKET:
3019 node_perm = TCP_SOCKET__NODE_BIND; 3037 node_perm = TCP_SOCKET__NODE_BIND;
3020 break; 3038 break;
3021 3039
3022 case IPPROTO_UDP: 3040 case SECCLASS_UDP_SOCKET:
3023 node_perm = UDP_SOCKET__NODE_BIND; 3041 node_perm = UDP_SOCKET__NODE_BIND;
3024 break; 3042 break;
3025 3043
diff --git a/sound/oss/au1000.c b/sound/oss/au1000.c
index 4491733c9e4e..2c2ae2ee01ac 100644
--- a/sound/oss/au1000.c
+++ b/sound/oss/au1000.c
@@ -1295,7 +1295,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma)
1295 unsigned long size; 1295 unsigned long size;
1296 int ret = 0; 1296 int ret = 0;
1297 1297
1298 dbg(__FUNCTION__); 1298 dbg("%s", __FUNCTION__);
1299 1299
1300 lock_kernel(); 1300 lock_kernel();
1301 down(&s->sem); 1301 down(&s->sem);
diff --git a/sound/oss/ite8172.c b/sound/oss/ite8172.c
index 58f879fda975..26e5944b6ba8 100644
--- a/sound/oss/ite8172.c
+++ b/sound/oss/ite8172.c
@@ -1859,7 +1859,7 @@ static int it8172_release(struct inode *inode, struct file *file)
1859 struct it8172_state *s = (struct it8172_state *)file->private_data; 1859 struct it8172_state *s = (struct it8172_state *)file->private_data;
1860 1860
1861#ifdef IT8172_VERBOSE_DEBUG 1861#ifdef IT8172_VERBOSE_DEBUG
1862 dbg(__FUNCTION__); 1862 dbg("%s", __FUNCTION__);
1863#endif 1863#endif
1864 lock_kernel(); 1864 lock_kernel();
1865 if (file->f_mode & FMODE_WRITE) 1865 if (file->f_mode & FMODE_WRITE)
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 8a59598167f9..c1a239a4dac6 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -405,7 +405,7 @@ static int snd_atiixp_acquire_codec(atiixp_t *chip)
405 405
406 while (atiixp_read(chip, PHYS_OUT_ADDR) & ATI_REG_PHYS_OUT_ADDR_EN) { 406 while (atiixp_read(chip, PHYS_OUT_ADDR) & ATI_REG_PHYS_OUT_ADDR_EN) {
407 if (! timeout--) { 407 if (! timeout--) {
408 snd_printk(KERN_WARNING "atiixp: codec acquire timeout\n"); 408 snd_printk(KERN_WARNING "atiixp-modem: codec acquire timeout\n");
409 return -EBUSY; 409 return -EBUSY;
410 } 410 }
411 udelay(1); 411 udelay(1);
@@ -436,7 +436,7 @@ static unsigned short snd_atiixp_codec_read(atiixp_t *chip, unsigned short codec
436 } while (--timeout); 436 } while (--timeout);
437 /* time out may happen during reset */ 437 /* time out may happen during reset */
438 if (reg < 0x7c) 438 if (reg < 0x7c)
439 snd_printk(KERN_WARNING "atiixp: codec read timeout (reg %x)\n", reg); 439 snd_printk(KERN_WARNING "atiixp-modem: codec read timeout (reg %x)\n", reg);
440 return 0xffff; 440 return 0xffff;
441} 441}
442 442
@@ -498,7 +498,7 @@ static int snd_atiixp_aclink_reset(atiixp_t *chip)
498 do_delay(); 498 do_delay();
499 atiixp_update(chip, CMD, ATI_REG_CMD_AC_RESET, ATI_REG_CMD_AC_RESET); 499 atiixp_update(chip, CMD, ATI_REG_CMD_AC_RESET, ATI_REG_CMD_AC_RESET);
500 if (--timeout) { 500 if (--timeout) {
501 snd_printk(KERN_ERR "atiixp: codec reset timeout\n"); 501 snd_printk(KERN_ERR "atiixp-modem: codec reset timeout\n");
502 break; 502 break;
503 } 503 }
504 } 504 }
@@ -552,7 +552,7 @@ static int snd_atiixp_codec_detect(atiixp_t *chip)
552 atiixp_write(chip, IER, 0); /* disable irqs */ 552 atiixp_write(chip, IER, 0); /* disable irqs */
553 553
554 if ((chip->codec_not_ready_bits & ALL_CODEC_NOT_READY) == ALL_CODEC_NOT_READY) { 554 if ((chip->codec_not_ready_bits & ALL_CODEC_NOT_READY) == ALL_CODEC_NOT_READY) {
555 snd_printk(KERN_ERR "atiixp: no codec detected!\n"); 555 snd_printk(KERN_ERR "atiixp-modem: no codec detected!\n");
556 return -ENXIO; 556 return -ENXIO;
557 } 557 }
558 return 0; 558 return 0;
@@ -635,7 +635,7 @@ static void snd_atiixp_xrun_dma(atiixp_t *chip, atiixp_dma_t *dma)
635{ 635{
636 if (! dma->substream || ! dma->running) 636 if (! dma->substream || ! dma->running)
637 return; 637 return;
638 snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type); 638 snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
639 snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN); 639 snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
640} 640}
641 641
@@ -1081,14 +1081,14 @@ static int __devinit snd_atiixp_mixer_new(atiixp_t *chip, int clock)
1081 ac97.scaps = AC97_SCAP_SKIP_AUDIO; 1081 ac97.scaps = AC97_SCAP_SKIP_AUDIO;
1082 if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) { 1082 if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
1083 chip->ac97[i] = NULL; /* to be sure */ 1083 chip->ac97[i] = NULL; /* to be sure */
1084 snd_printdd("atiixp: codec %d not available for modem\n", i); 1084 snd_printdd("atiixp-modem: codec %d not available for modem\n", i);
1085 continue; 1085 continue;
1086 } 1086 }
1087 codec_count++; 1087 codec_count++;
1088 } 1088 }
1089 1089
1090 if (! codec_count) { 1090 if (! codec_count) {
1091 snd_printk(KERN_ERR "atiixp: no codec available\n"); 1091 snd_printk(KERN_ERR "atiixp-modem: no codec available\n");
1092 return -ENODEV; 1092 return -ENODEV;
1093 } 1093 }
1094 1094
@@ -1159,7 +1159,7 @@ static void __devinit snd_atiixp_proc_init(atiixp_t *chip)
1159{ 1159{
1160 snd_info_entry_t *entry; 1160 snd_info_entry_t *entry;
1161 1161
1162 if (! snd_card_proc_new(chip->card, "atiixp", &entry)) 1162 if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry))
1163 snd_info_set_text_ops(entry, chip, 1024, snd_atiixp_proc_read); 1163 snd_info_set_text_ops(entry, chip, 1024, snd_atiixp_proc_read);
1164} 1164}
1165 1165