aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-08-31 01:45:48 -0400
committerPaul Mackerras <paulus@samba.org>2006-08-31 01:45:48 -0400
commitaa43f77939c97bf9d3580c6a5e71a5a40290e451 (patch)
tree095c0b8b3da4b6554a3f8ef4b39240a5d9216d4d
parent2818c5dec5e28d65d52afbb7695bbbafe6377ee5 (diff)
parent4c15343167b5febe7bb0ba96aad5bef42ae94d3b (diff)
Merge branch 'merge'
-rw-r--r--CREDITS2
-rw-r--r--Documentation/connector/ucon.c206
-rw-r--r--Documentation/cpusets.txt6
-rw-r--r--Documentation/fb/imacfb.txt31
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/filesystems/relay.txt479
-rw-r--r--Documentation/filesystems/relayfs.txt442
-rw-r--r--Documentation/input/joystick.txt1
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/kobject.txt2
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--Documentation/powerpc/booting-without-of.txt6
-rw-r--r--Documentation/scsi/ChangeLog.megaraid123
-rw-r--r--Documentation/sysctl/fs.txt20
-rw-r--r--Documentation/sysctl/kernel.txt20
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile34
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/common/dmabounce.c8
-rw-r--r--arch/arm/common/rtctime.c1
-rw-r--r--arch/arm/common/sa1111.c6
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/entry-armv.S21
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/isa.c63
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-footbridge/dc21285.c1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c7
-rw-r--r--arch/arm/mach-pxa/corgi_ssp.c20
-rw-r--r--arch/arm/mach-pxa/ssp.c35
-rw-r--r--arch/arm/mach-s3c2410/Makefile36
-rw-r--r--arch/arm/mach-s3c2410/dma.c163
-rw-r--r--arch/arm/mach-sa1100/ssp.c46
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/arm/mm/Kconfig13
-rw-r--r--arch/arm/vfp/vfp.h8
-rw-r--r--arch/arm/vfp/vfpdouble.c30
-rw-r--r--arch/arm/vfp/vfpmodule.c4
-rw-r--r--arch/arm/vfp/vfpsingle.c35
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/kernel/acpi/boot.c2
-rw-r--r--arch/i386/kernel/acpi/wakeup.S5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c9
-rw-r--r--arch/i386/kernel/head.S14
-rw-r--r--arch/i386/kernel/irq.c5
-rw-r--r--arch/i386/kernel/setup.c32
-rw-r--r--arch/i386/kernel/traps.c29
-rw-r--r--arch/i386/pci/common.c5
-rw-r--r--arch/i386/pci/init.c8
-rw-r--r--arch/i386/pci/mmconfig.c36
-rw-r--r--arch/i386/pci/pci.h3
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/sim/simscsi.c3
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c28
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c24
-rw-r--r--arch/powerpc/Kconfig20
-rw-r--r--arch/powerpc/boot/dts/mpc7448hpc2.dts190
-rw-r--r--arch/powerpc/boot/dts/mpc8349emds.dts328
-rw-r--r--arch/powerpc/configs/mpc834x_mds_defconfig (renamed from arch/powerpc/configs/mpc834x_sys_defconfig)0
-rw-r--r--arch/powerpc/kernel/fpu.S5
-rw-r--r--arch/powerpc/kernel/irq.c84
-rw-r--r--arch/powerpc/kernel/pci_64.c11
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/prom_parse.c15
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/lib/memcpy_64.S11
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c49
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c56
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/83xx/pci.c9
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c2
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/ipic.c303
-rw-r--r--arch/powerpc/sysdev/ipic.h23
-rw-r--r--arch/powerpc/sysdev/mpic.c223
-rw-r--r--arch/ppc/kernel/smp-tbsync.c7
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c89
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.h19
-rw-r--r--arch/ppc/platforms/mpc8272ads_setup.c154
-rw-r--r--arch/ppc/platforms/mpc866ads_setup.c192
-rw-r--r--arch/ppc/platforms/mpc885ads_setup.c175
-rw-r--r--arch/ppc/platforms/pq2ads_pd.h82
-rw-r--r--arch/ppc/syslib/Makefile2
-rw-r--r--arch/ppc/syslib/ipic.c646
-rw-r--r--arch/ppc/syslib/ipic.h47
-rw-r--r--arch/ppc/syslib/mpc85xx_devices.c89
-rw-r--r--arch/ppc/syslib/mpc8xx_devices.c8
-rw-r--r--arch/ppc/syslib/mpc8xx_sys.c6
-rw-r--r--arch/ppc/syslib/pq2_devices.c5
-rw-r--r--arch/ppc/syslib/pq2_sys.c3
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/mm/init.c6
-rw-r--r--arch/sparc/kernel/setup.c4
-rw-r--r--arch/sparc/kernel/smp.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc64/mm/generic.c2
-rw-r--r--arch/x86_64/defconfig66
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c57
-rw-r--r--arch/x86_64/kernel/e820.c35
-rw-r--r--arch/x86_64/kernel/entry.S3
-rw-r--r--arch/x86_64/kernel/head.S1
-rw-r--r--arch/x86_64/kernel/init_task.c5
-rw-r--r--arch/x86_64/kernel/setup.c6
-rw-r--r--arch/x86_64/kernel/setup64.c3
-rw-r--r--arch/x86_64/kernel/traps.c32
-rw-r--r--arch/x86_64/pci/mmconfig.c34
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/battery.c3
-rw-r--r--drivers/acpi/bus.c11
-rw-r--r--drivers/acpi/hotkey.c281
-rw-r--r--drivers/acpi/i2c_ec.c2
-rw-r--r--drivers/acpi/osl.c10
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/cdrom/gscd.c2
-rw-r--r--drivers/char/drm/radeon_state.c9
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/tty_io.c808
-rw-r--r--drivers/char/tty_ioctl.c59
-rw-r--r--drivers/char/vt_ioctl.c2
-rw-r--r--drivers/char/watchdog/Kconfig8
-rw-r--r--drivers/hwmon/abituguru.c99
-rw-r--r--drivers/i2c/chips/tps65010.c12
-rw-r--r--drivers/ide/pci/generic.c30
-rw-r--r--drivers/ide/pci/via82cxxx.c3
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c54
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c22
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/misc/wistron_btns.c16
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/media/dvb/bt8xx/dst.c58
-rw-r--r--drivers/media/dvb/dvb-core/Makefile6
-rw-r--r--drivers/media/radio/Kconfig12
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/dsbr100.c (renamed from drivers/media/video/dsbr100.c)0
-rw-r--r--drivers/media/video/Kconfig12
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/compat_ioctl32.c32
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/video/cx88/cx88-video.c4
-rw-r--r--drivers/media/video/msp3400-kthreads.c4
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pwc/pwc-if.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/tuner-types.c14
-rw-r--r--drivers/media/video/v4l1-compat.c4
-rw-r--r--drivers/media/video/v4l2-common.c6
-rw-r--r--drivers/media/video/videodev.c2
-rw-r--r--drivers/media/video/vivi.c4
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptfc.c92
-rw-r--r--drivers/mmc/mmc_queue.c3
-rw-r--r--drivers/mmc/wbsd.c9
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig41
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/bnx2.c49
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/dm9000.c14
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000_hw.c89
-rw-r--r--drivers/net/e1000/e1000_hw.h32
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e2100.c4
-rw-r--r--drivers/net/eepro.c3
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/es3210.c3
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/fs_enet/Makefile6
-rw-r--r--drivers/net/fs_enet/fec.h42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c207
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c505
-rw-r--r--drivers/net/fs_enet/fs_enet.h40
-rw-r--r--drivers/net/fs_enet/mac-fcc.c32
-rw-r--r--drivers/net/fs_enet/mac-fec.c142
-rw-r--r--drivers/net/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c448
-rw-r--r--drivers/net/fs_enet/mii-fec.c243
-rw-r--r--drivers/net/fs_enet/mii-fixed.c91
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c18
-rw-r--r--drivers/net/pcnet32.c25
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/fixed.c358
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy_device.c51
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/seeq8005.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smc911x.c1
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h29
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h3
-rw-r--r--drivers/net/spider_net_ethtool.c13
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sunlance.c27
-rw-r--r--drivers/net/tg3.c51
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/smctr.c5
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c3
-rw-r--r--drivers/net/ucc_geth.c4278
-rw-r--r--drivers/net/ucc_geth.h1339
-rw-r--r--drivers/net/ucc_geth_phy.c801
-rw-r--r--drivers/net/ucc_geth_phy.h217
-rw-r--r--drivers/net/via-rhine.c90
-rw-r--r--drivers/net/wan/c101.c9
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
-rw-r--r--drivers/net/wireless/strip.c6
-rw-r--r--drivers/pci/hotplug/Kconfig9
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c54
-rw-r--r--drivers/pci/hotplug/pciehp.h5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/quirks.c59
-rw-r--r--drivers/rtc/rtc-s3c.c124
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c86
-rw-r--r--drivers/s390/block/dasd_eckd.c22
-rw-r--r--drivers/s390/block/xpram.c25
-rw-r--r--drivers/s390/char/tape_class.c2
-rw-r--r--drivers/s390/cio/device_fsm.c1
-rw-r--r--drivers/s390/cio/device_ops.c3
-rw-r--r--drivers/s390/net/qeth_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_aux.c120
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c212
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c122
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c79
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
-rw-r--r--drivers/scsi/arm/Kconfig3
-rw-r--r--drivers/scsi/arm/scsi.h2
-rw-r--r--drivers/scsi/ata_piix.c81
-rw-r--r--drivers/scsi/esp.c3
-rw-r--r--drivers/scsi/hptiop.c568
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c209
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libata-core.c36
-rw-r--r--drivers/scsi/libata-scsi.c13
-rw-r--r--drivers/scsi/libiscsi.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c42
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/pdc_adma.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/sata_sil24.c1
-rw-r--r--drivers/scsi/sata_via.c117
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/serial/8250_pci.c14
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunzilog.c3
-rw-r--r--drivers/usb/host/ohci-au1xxx.c1
-rw-r--r--drivers/usb/input/appletouch.c2
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/usbtest.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ipaq.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/aty/aty128fb.c5
-rw-r--r--drivers/video/aty/atyfb_base.c5
-rw-r--r--drivers/video/imacfb.c49
-rw-r--r--drivers/video/matrox/g450_pll.c8
-rw-r--r--drivers/video/nvidia/nv_backlight.c4
-rw-r--r--drivers/video/riva/fbdev.c5
-rw-r--r--fs/adfs/super.c2
-rw-r--r--fs/block_dev.c114
-rw-r--r--fs/cifs/CHANGES10
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifsencrypt.c3
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c28
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/cifs/file.c97
-rw-r--r--fs/cifs/netmisc.c1
-rw-r--r--fs/cifs/readdir.c2
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/cifs/transport.c618
-rw-r--r--fs/cifs/xattr.c6
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c10
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/ioprio.c30
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/journal.c92
-rw-r--r--fs/jbd/transaction.c9
-rw-r--r--fs/jfs/inode.c16
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/super.c118
-rw-r--r--fs/lockd/svcsubs.c15
-rw-r--r--fs/locks.c6
-rw-r--r--fs/minix/inode.c13
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/idmap.c4
-rw-r--r--fs/nfs/nfs4proc.c29
-rw-r--r--fs/nfs/nfs4xdr.c21
-rw-r--r--fs/nfs/read.c23
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c1
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c43
-rw-r--r--fs/ocfs2/localalloc.c8
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/suballoc.c261
-rw-r--r--fs/ocfs2/suballoc.h2
-rw-r--r--fs/ocfs2/super.c8
-rw-r--r--fs/partitions/sun.c2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/udf/truncate.c64
-rw-r--r--fs/ufs/inode.c35
-rw-r--r--fs/ufs/truncate.c77
-rw-r--r--fs/xfs/xfs_alloc.c103
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--include/asm-arm/arch-pxa/ssp.h4
-rw-r--r--include/asm-arm/arch-s3c2410/dma.h1
-rw-r--r--include/asm-arm/arch-s3c2410/regs-rtc.h2
-rw-r--r--include/asm-arm/hardware/ssp.h4
-rw-r--r--include/asm-arm/io.h7
-rw-r--r--include/asm-arm/procinfo.h1
-rw-r--r--include/asm-i386/alternative.h20
-rw-r--r--include/asm-i386/mmzone.h2
-rw-r--r--include/asm-i386/rwlock.h14
-rw-r--r--include/asm-i386/spinlock.h17
-rw-r--r--include/asm-i386/unistd.h4
-rw-r--r--include/asm-i386/unwind.h1
-rw-r--r--include/asm-ia64/sn/sn_sal.h6
-rw-r--r--include/asm-ia64/sn/xp.h22
-rw-r--r--include/asm-ia64/sn/xpc.h4
-rw-r--r--include/asm-powerpc/io.h7
-rw-r--r--include/asm-powerpc/ipic.h12
-rw-r--r--include/asm-powerpc/mpic.h125
-rw-r--r--include/asm-powerpc/prom.h4
-rw-r--r--include/asm-powerpc/time.h4
-rw-r--r--include/asm-ppc/cpm2.h95
-rw-r--r--include/asm-ppc/mpc8260.h1
-rw-r--r--include/asm-ppc/mpc8xx.h1
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-x86_64/alternative.h21
-rw-r--r--include/asm-x86_64/processor.h6
-rw-r--r--include/asm-x86_64/spinlock.h11
-rw-r--r--include/asm-x86_64/unistd.h11
-rw-r--r--include/asm-x86_64/unwind.h1
-rw-r--r--include/linux/compat_ioctl.h1
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fs_enet_pd.h50
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/ioprio.h23
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/linux/netfilter_bridge.h14
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/node.h10
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/skbuff.h19
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h4
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/vt.h1
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/scsi/libiscsi.h19
-rw-r--r--include/scsi/scsi_transport_iscsi.h4
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/futex.c18
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/timer.c41
-rw-r--r--kernel/workqueue.c33
-rw-r--r--lib/ts_bm.c11
-rw-r--r--mm/swap.c20
-rw-r--r--mm/swapfile.c3
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/dst.c3
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c7
-rw-r--r--net/core/wireless.c24
-rw-r--r--net/dccp/ccids/ccid3.c153
-rw-r--r--net/dccp/ccids/ccid3.h9
-rw-r--r--net/dccp/ccids/lib/loss_interval.c36
-rw-r--r--net/dccp/ccids/lib/loss_interval.h9
-rw-r--r--net/dccp/ccids/lib/packet_history.c168
-rw-r--r--net/dccp/ccids/lib/packet_history.h17
-rw-r--r--net/dccp/ccids/lib/tfrc.h2
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h10
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/igmp.c38
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c30
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c17
-rw-r--r--net/ipv4/netfilter/ip_tables.c36
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c5
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c11
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_probe.c3
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c34
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipx/af_ipx.c10
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/xt_physdev.c1
-rw-r--r--net/netfilter/xt_string.c2
-rw-r--r--net/netlink/af_netlink.c14
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sctp/sm_make_chunk.c30
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/socket.c20
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c3
-rw-r--r--net/sunrpc/clnt.c30
-rw-r--r--net/sunrpc/rpc_pipe.c55
-rw-r--r--net/xfrm/xfrm_policy.c27
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--sound/oss/Kconfig6
-rw-r--r--sound/pci/Kconfig70
-rw-r--r--usr/Makefile3
521 files changed, 17259 insertions, 6141 deletions
diff --git a/CREDITS b/CREDITS
index 29be6d1fdf4..0fe904ebb7c 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2209,7 +2209,7 @@ S: (address available on request)
2209S: USA 2209S: USA
2210 2210
2211N: Ian McDonald 2211N: Ian McDonald
2212E: iam4@cs.waikato.ac.nz 2212E: ian.mcdonald@jandi.co.nz
2213E: imcdnzl@gmail.com 2213E: imcdnzl@gmail.com
2214W: http://wand.net.nz/~iam4 2214W: http://wand.net.nz/~iam4
2215W: http://imcdnzl.blogspot.com 2215W: http://imcdnzl.blogspot.com
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c
new file mode 100644
index 00000000000..d738cde2a8d
--- /dev/null
+++ b/Documentation/connector/ucon.c
@@ -0,0 +1,206 @@
1/*
2 * ucon.c
3 *
4 * Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <asm/types.h>
23
24#include <sys/types.h>
25#include <sys/socket.h>
26#include <sys/poll.h>
27
28#include <linux/netlink.h>
29#include <linux/rtnetlink.h>
30
31#include <arpa/inet.h>
32
33#include <stdio.h>
34#include <stdlib.h>
35#include <unistd.h>
36#include <string.h>
37#include <errno.h>
38#include <time.h>
39
40#include <linux/connector.h>
41
42#define DEBUG
43#define NETLINK_CONNECTOR 11
44
45#ifdef DEBUG
46#define ulog(f, a...) fprintf(stdout, f, ##a)
47#else
48#define ulog(f, a...) do {} while (0)
49#endif
50
51static int need_exit;
52static __u32 seq;
53
54static int netlink_send(int s, struct cn_msg *msg)
55{
56 struct nlmsghdr *nlh;
57 unsigned int size;
58 int err;
59 char buf[128];
60 struct cn_msg *m;
61
62 size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
63
64 nlh = (struct nlmsghdr *)buf;
65 nlh->nlmsg_seq = seq++;
66 nlh->nlmsg_pid = getpid();
67 nlh->nlmsg_type = NLMSG_DONE;
68 nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
69 nlh->nlmsg_flags = 0;
70
71 m = NLMSG_DATA(nlh);
72#if 0
73 ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
74 __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
75#endif
76 memcpy(m, msg, sizeof(*m) + msg->len);
77
78 err = send(s, nlh, size, 0);
79 if (err == -1)
80 ulog("Failed to send: %s [%d].\n",
81 strerror(errno), errno);
82
83 return err;
84}
85
86int main(int argc, char *argv[])
87{
88 int s;
89 char buf[1024];
90 int len;
91 struct nlmsghdr *reply;
92 struct sockaddr_nl l_local;
93 struct cn_msg *data;
94 FILE *out;
95 time_t tm;
96 struct pollfd pfd;
97
98 if (argc < 2)
99 out = stdout;
100 else {
101 out = fopen(argv[1], "a+");
102 if (!out) {
103 ulog("Unable to open %s for writing: %s\n",
104 argv[1], strerror(errno));
105 out = stdout;
106 }
107 }
108
109 memset(buf, 0, sizeof(buf));
110
111 s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
112 if (s == -1) {
113 perror("socket");
114 return -1;
115 }
116
117 l_local.nl_family = AF_NETLINK;
118 l_local.nl_groups = 0x123; /* bitmask of requested groups */
119 l_local.nl_pid = 0;
120
121 if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
122 perror("bind");
123 close(s);
124 return -1;
125 }
126
127#if 0
128 {
129 int on = 0x57; /* Additional group number */
130 setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
131 }
132#endif
133 if (0) {
134 int i, j;
135
136 memset(buf, 0, sizeof(buf));
137
138 data = (struct cn_msg *)buf;
139
140 data->id.idx = 0x123;
141 data->id.val = 0x456;
142 data->seq = seq++;
143 data->ack = 0;
144 data->len = 0;
145
146 for (j=0; j<10; ++j) {
147 for (i=0; i<1000; ++i) {
148 len = netlink_send(s, data);
149 }
150
151 ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
152 }
153
154 return 0;
155 }
156
157
158 pfd.fd = s;
159
160 while (!need_exit) {
161 pfd.events = POLLIN;
162 pfd.revents = 0;
163 switch (poll(&pfd, 1, -1)) {
164 case 0:
165 need_exit = 1;
166 break;
167 case -1:
168 if (errno != EINTR) {
169 need_exit = 1;
170 break;
171 }
172 continue;
173 }
174 if (need_exit)
175 break;
176
177 memset(buf, 0, sizeof(buf));
178 len = recv(s, buf, sizeof(buf), 0);
179 if (len == -1) {
180 perror("recv buf");
181 close(s);
182 return -1;
183 }
184 reply = (struct nlmsghdr *)buf;
185
186 switch (reply->nlmsg_type) {
187 case NLMSG_ERROR:
188 fprintf(out, "Error message received.\n");
189 fflush(out);
190 break;
191 case NLMSG_DONE:
192 data = (struct cn_msg *)NLMSG_DATA(reply);
193
194 time(&tm);
195 fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
196 ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
197 fflush(out);
198 break;
199 default:
200 break;
201 }
202 }
203
204 close(s);
205 return 0;
206}
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 159e2a0c3e8..76b44290c15 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -217,6 +217,12 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs)
217to represent the cpuset hierarchy provides for a familiar permission 217to represent the cpuset hierarchy provides for a familiar permission
218and name space for cpusets, with a minimum of additional kernel code. 218and name space for cpusets, with a minimum of additional kernel code.
219 219
220The cpus file in the root (top_cpuset) cpuset is read-only.
221It automatically tracks the value of cpu_online_map, using a CPU
222hotplug notifier. If and when memory nodes can be hotplugged,
223we expect to make the mems file in the root cpuset read-only
224as well, and have it track the value of node_online_map.
225
220 226
2211.4 What are exclusive cpusets ? 2271.4 What are exclusive cpusets ?
222-------------------------------- 228--------------------------------
diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/imacfb.txt
new file mode 100644
index 00000000000..759028545a7
--- /dev/null
+++ b/Documentation/fb/imacfb.txt
@@ -0,0 +1,31 @@
1
2What is imacfb?
3===============
4
5This is a generic EFI platform driver for Intel based Apple computers.
6Imacfb is only for EFI booted Intel Macs.
7
8Supported Hardware
9==================
10
11iMac 17"/20"
12Macbook
13Macbook Pro 15"/17"
14MacMini
15
16How to use it?
17==============
18
19Imacfb does not have any kind of autodetection of your machine.
20You have to add the fillowing kernel parameters in your elilo.conf:
21 Macbook :
22 video=imacfb:macbook
23 MacMini :
24 video=imacfb:mini
25 Macbook Pro 15", iMac 17" :
26 video=imacfb:i17
27 Macbook Pro 17", iMac 20" :
28 video=imacfb:i20
29
30--
31Edgar Hucek <gimli@dark-green.com>
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 66fdc0744fe..16dec61d767 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt
62 - info on the 'in memory' filesystems ramfs, rootfs and initramfs. 62 - info on the 'in memory' filesystems ramfs, rootfs and initramfs.
63reiser4.txt 63reiser4.txt
64 - info on the Reiser4 filesystem based on dancing tree algorithms. 64 - info on the Reiser4 filesystem based on dancing tree algorithms.
65relayfs.txt 65relay.txt
66 - info on relayfs, for efficient streaming from kernel to user space. 66 - info on relay, for efficient streaming from kernel to user space.
67romfs.txt 67romfs.txt
68 - description of the ROMFS filesystem. 68 - description of the ROMFS filesystem.
69smbfs.txt 69smbfs.txt
diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt
new file mode 100644
index 00000000000..d6788dae034
--- /dev/null
+++ b/Documentation/filesystems/relay.txt
@@ -0,0 +1,479 @@
1relay interface (formerly relayfs)
2==================================
3
4The relay interface provides a means for kernel applications to
5efficiently log and transfer large quantities of data from the kernel
6to userspace via user-defined 'relay channels'.
7
8A 'relay channel' is a kernel->user data relay mechanism implemented
9as a set of per-cpu kernel buffers ('channel buffers'), each
10represented as a regular file ('relay file') in user space. Kernel
11clients write into the channel buffers using efficient write
12functions; these automatically log into the current cpu's channel
13buffer. User space applications mmap() or read() from the relay files
14and retrieve the data as it becomes available. The relay files
15themselves are files created in a host filesystem, e.g. debugfs, and
16are associated with the channel buffers using the API described below.
17
18The format of the data logged into the channel buffers is completely
19up to the kernel client; the relay interface does however provide
20hooks which allow kernel clients to impose some structure on the
21buffer data. The relay interface doesn't implement any form of data
22filtering - this also is left to the kernel client. The purpose is to
23keep things as simple as possible.
24
25This document provides an overview of the relay interface API. The
26details of the function parameters are documented along with the
27functions in the relay interface code - please see that for details.
28
29Semantics
30=========
31
32Each relay channel has one buffer per CPU, each buffer has one or more
33sub-buffers. Messages are written to the first sub-buffer until it is
34too full to contain a new message, in which case it it is written to
35the next (if available). Messages are never split across sub-buffers.
36At this point, userspace can be notified so it empties the first
37sub-buffer, while the kernel continues writing to the next.
38
39When notified that a sub-buffer is full, the kernel knows how many
40bytes of it are padding i.e. unused space occurring because a complete
41message couldn't fit into a sub-buffer. Userspace can use this
42knowledge to copy only valid data.
43
44After copying it, userspace can notify the kernel that a sub-buffer
45has been consumed.
46
47A relay channel can operate in a mode where it will overwrite data not
48yet collected by userspace, and not wait for it to be consumed.
49
50The relay channel itself does not provide for communication of such
51data between userspace and kernel, allowing the kernel side to remain
52simple and not impose a single interface on userspace. It does
53provide a set of examples and a separate helper though, described
54below.
55
56The read() interface both removes padding and internally consumes the
57read sub-buffers; thus in cases where read(2) is being used to drain
58the channel buffers, special-purpose communication between kernel and
59user isn't necessary for basic operation.
60
61One of the major goals of the relay interface is to provide a low
62overhead mechanism for conveying kernel data to userspace. While the
63read() interface is easy to use, it's not as efficient as the mmap()
64approach; the example code attempts to make the tradeoff between the
65two approaches as small as possible.
66
67klog and relay-apps example code
68================================
69
70The relay interface itself is ready to use, but to make things easier,
71a couple simple utility functions and a set of examples are provided.
72
73The relay-apps example tarball, available on the relay sourceforge
74site, contains a set of self-contained examples, each consisting of a
75pair of .c files containing boilerplate code for each of the user and
76kernel sides of a relay application. When combined these two sets of
77boilerplate code provide glue to easily stream data to disk, without
78having to bother with mundane housekeeping chores.
79
80The 'klog debugging functions' patch (klog.patch in the relay-apps
81tarball) provides a couple of high-level logging functions to the
82kernel which allow writing formatted text or raw data to a channel,
83regardless of whether a channel to write into exists or not, or even
84whether the relay interface is compiled into the kernel or not. These
85functions allow you to put unconditional 'trace' statements anywhere
86in the kernel or kernel modules; only when there is a 'klog handler'
87registered will data actually be logged (see the klog and kleak
88examples for details).
89
90It is of course possible to use the relay interface from scratch,
91i.e. without using any of the relay-apps example code or klog, but
92you'll have to implement communication between userspace and kernel,
93allowing both to convey the state of buffers (full, empty, amount of
94padding). The read() interface both removes padding and internally
95consumes the read sub-buffers; thus in cases where read(2) is being
96used to drain the channel buffers, special-purpose communication
97between kernel and user isn't necessary for basic operation. Things
98such as buffer-full conditions would still need to be communicated via
99some channel though.
100
101klog and the relay-apps examples can be found in the relay-apps
102tarball on http://relayfs.sourceforge.net
103
104The relay interface user space API
105==================================
106
107The relay interface implements basic file operations for user space
108access to relay channel buffer data. Here are the file operations
109that are available and some comments regarding their behavior:
110
111open() enables user to open an _existing_ channel buffer.
112
113mmap() results in channel buffer being mapped into the caller's
114 memory space. Note that you can't do a partial mmap - you
115 must map the entire file, which is NRBUF * SUBBUFSIZE.
116
117read() read the contents of a channel buffer. The bytes read are
118 'consumed' by the reader, i.e. they won't be available
119 again to subsequent reads. If the channel is being used
120 in no-overwrite mode (the default), it can be read at any
121 time even if there's an active kernel writer. If the
122 channel is being used in overwrite mode and there are
123 active channel writers, results may be unpredictable -
124 users should make sure that all logging to the channel has
125 ended before using read() with overwrite mode. Sub-buffer
126 padding is automatically removed and will not be seen by
127 the reader.
128
129sendfile() transfer data from a channel buffer to an output file
130 descriptor. Sub-buffer padding is automatically removed
131 and will not be seen by the reader.
132
133poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
134 notified when sub-buffer boundaries are crossed.
135
136close() decrements the channel buffer's refcount. When the refcount
137 reaches 0, i.e. when no process or kernel client has the
138 buffer open, the channel buffer is freed.
139
140In order for a user application to make use of relay files, the
141host filesystem must be mounted. For example,
142
143 mount -t debugfs debugfs /debug
144
145NOTE: the host filesystem doesn't need to be mounted for kernel
146 clients to create or use channels - it only needs to be
147 mounted when user space applications need access to the buffer
148 data.
149
150
151The relay interface kernel API
152==============================
153
154Here's a summary of the API the relay interface provides to in-kernel clients:
155
156TBD(curr. line MT:/API/)
157 channel management functions:
158
159 relay_open(base_filename, parent, subbuf_size, n_subbufs,
160 callbacks)
161 relay_close(chan)
162 relay_flush(chan)
163 relay_reset(chan)
164
165 channel management typically called on instigation of userspace:
166
167 relay_subbufs_consumed(chan, cpu, subbufs_consumed)
168
169 write functions:
170
171 relay_write(chan, data, length)
172 __relay_write(chan, data, length)
173 relay_reserve(chan, length)
174
175 callbacks:
176
177 subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
178 buf_mapped(buf, filp)
179 buf_unmapped(buf, filp)
180 create_buf_file(filename, parent, mode, buf, is_global)
181 remove_buf_file(dentry)
182
183 helper functions:
184
185 relay_buf_full(buf)
186 subbuf_start_reserve(buf, length)
187
188
189Creating a channel
190------------------
191
192relay_open() is used to create a channel, along with its per-cpu
193channel buffers. Each channel buffer will have an associated file
194created for it in the host filesystem, which can be and mmapped or
195read from in user space. The files are named basename0...basenameN-1
196where N is the number of online cpus, and by default will be created
197in the root of the filesystem (if the parent param is NULL). If you
198want a directory structure to contain your relay files, you should
199create it using the host filesystem's directory creation function,
200e.g. debugfs_create_dir(), and pass the parent directory to
201relay_open(). Users are responsible for cleaning up any directory
202structure they create, when the channel is closed - again the host
203filesystem's directory removal functions should be used for that,
204e.g. debugfs_remove().
205
206In order for a channel to be created and the host filesystem's files
207associated with its channel buffers, the user must provide definitions
208for two callback functions, create_buf_file() and remove_buf_file().
209create_buf_file() is called once for each per-cpu buffer from
210relay_open() and allows the user to create the file which will be used
211to represent the corresponding channel buffer. The callback should
212return the dentry of the file created to represent the channel buffer.
213remove_buf_file() must also be defined; it's responsible for deleting
214the file(s) created in create_buf_file() and is called during
215relay_close().
216
217Here are some typical definitions for these callbacks, in this case
218using debugfs:
219
220/*
221 * create_buf_file() callback. Creates relay file in debugfs.
222 */
223static struct dentry *create_buf_file_handler(const char *filename,
224 struct dentry *parent,
225 int mode,
226 struct rchan_buf *buf,
227 int *is_global)
228{
229 return debugfs_create_file(filename, mode, parent, buf,
230 &relay_file_operations);
231}
232
233/*
234 * remove_buf_file() callback. Removes relay file from debugfs.
235 */
236static int remove_buf_file_handler(struct dentry *dentry)
237{
238 debugfs_remove(dentry);
239
240 return 0;
241}
242
243/*
244 * relay interface callbacks
245 */
246static struct rchan_callbacks relay_callbacks =
247{
248 .create_buf_file = create_buf_file_handler,
249 .remove_buf_file = remove_buf_file_handler,
250};
251
252And an example relay_open() invocation using them:
253
254 chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks);
255
256If the create_buf_file() callback fails, or isn't defined, channel
257creation and thus relay_open() will fail.
258
259The total size of each per-cpu buffer is calculated by multiplying the
260number of sub-buffers by the sub-buffer size passed into relay_open().
261The idea behind sub-buffers is that they're basically an extension of
262double-buffering to N buffers, and they also allow applications to
263easily implement random-access-on-buffer-boundary schemes, which can
264be important for some high-volume applications. The number and size
265of sub-buffers is completely dependent on the application and even for
266the same application, different conditions will warrant different
267values for these parameters at different times. Typically, the right
268values to use are best decided after some experimentation; in general,
269though, it's safe to assume that having only 1 sub-buffer is a bad
270idea - you're guaranteed to either overwrite data or lose events
271depending on the channel mode being used.
272
273The create_buf_file() implementation can also be defined in such a way
274as to allow the creation of a single 'global' buffer instead of the
275default per-cpu set. This can be useful for applications interested
276mainly in seeing the relative ordering of system-wide events without
277the need to bother with saving explicit timestamps for the purpose of
278merging/sorting per-cpu files in a postprocessing step.
279
280To have relay_open() create a global buffer, the create_buf_file()
281implementation should set the value of the is_global outparam to a
282non-zero value in addition to creating the file that will be used to
283represent the single buffer. In the case of a global buffer,
284create_buf_file() and remove_buf_file() will be called only once. The
285normal channel-writing functions, e.g. relay_write(), can still be
286used - writes from any cpu will transparently end up in the global
287buffer - but since it is a global buffer, callers should make sure
288they use the proper locking for such a buffer, either by wrapping
289writes in a spinlock, or by copying a write function from relay.h and
290creating a local version that internally does the proper locking.
291
292Channel 'modes'
293---------------
294
295relay channels can be used in either of two modes - 'overwrite' or
296'no-overwrite'. The mode is entirely determined by the implementation
297of the subbuf_start() callback, as described below. The default if no
298subbuf_start() callback is defined is 'no-overwrite' mode. If the
299default mode suits your needs, and you plan to use the read()
300interface to retrieve channel data, you can ignore the details of this
301section, as it pertains mainly to mmap() implementations.
302
303In 'overwrite' mode, also known as 'flight recorder' mode, writes
304continuously cycle around the buffer and will never fail, but will
305unconditionally overwrite old data regardless of whether it's actually
306been consumed. In no-overwrite mode, writes will fail, i.e. data will
307be lost, if the number of unconsumed sub-buffers equals the total
308number of sub-buffers in the channel. It should be clear that if
309there is no consumer or if the consumer can't consume sub-buffers fast
310enough, data will be lost in either case; the only difference is
311whether data is lost from the beginning or the end of a buffer.
312
313As explained above, a relay channel is made of up one or more
314per-cpu channel buffers, each implemented as a circular buffer
315subdivided into one or more sub-buffers. Messages are written into
316the current sub-buffer of the channel's current per-cpu buffer via the
317write functions described below. Whenever a message can't fit into
318the current sub-buffer, because there's no room left for it, the
319client is notified via the subbuf_start() callback that a switch to a
320new sub-buffer is about to occur. The client uses this callback to 1)
321initialize the next sub-buffer if appropriate 2) finalize the previous
322sub-buffer if appropriate and 3) return a boolean value indicating
323whether or not to actually move on to the next sub-buffer.
324
325To implement 'no-overwrite' mode, the userspace client would provide
326an implementation of the subbuf_start() callback something like the
327following:
328
329static int subbuf_start(struct rchan_buf *buf,
330 void *subbuf,
331 void *prev_subbuf,
332 unsigned int prev_padding)
333{
334 if (prev_subbuf)
335 *((unsigned *)prev_subbuf) = prev_padding;
336
337 if (relay_buf_full(buf))
338 return 0;
339
340 subbuf_start_reserve(buf, sizeof(unsigned int));
341
342 return 1;
343}
344
345If the current buffer is full, i.e. all sub-buffers remain unconsumed,
346the callback returns 0 to indicate that the buffer switch should not
347occur yet, i.e. until the consumer has had a chance to read the
348current set of ready sub-buffers. For the relay_buf_full() function
349to make sense, the consumer is reponsible for notifying the relay
350interface when sub-buffers have been consumed via
351relay_subbufs_consumed(). Any subsequent attempts to write into the
352buffer will again invoke the subbuf_start() callback with the same
353parameters; only when the consumer has consumed one or more of the
354ready sub-buffers will relay_buf_full() return 0, in which case the
355buffer switch can continue.
356
357The implementation of the subbuf_start() callback for 'overwrite' mode
358would be very similar:
359
360static int subbuf_start(struct rchan_buf *buf,
361 void *subbuf,
362 void *prev_subbuf,
363 unsigned int prev_padding)
364{
365 if (prev_subbuf)
366 *((unsigned *)prev_subbuf) = prev_padding;
367
368 subbuf_start_reserve(buf, sizeof(unsigned int));
369
370 return 1;
371}
372
373In this case, the relay_buf_full() check is meaningless and the
374callback always returns 1, causing the buffer switch to occur
375unconditionally. It's also meaningless for the client to use the
376relay_subbufs_consumed() function in this mode, as it's never
377consulted.
378
379The default subbuf_start() implementation, used if the client doesn't
380define any callbacks, or doesn't define the subbuf_start() callback,
381implements the simplest possible 'no-overwrite' mode, i.e. it does
382nothing but return 0.
383
384Header information can be reserved at the beginning of each sub-buffer
385by calling the subbuf_start_reserve() helper function from within the
386subbuf_start() callback. This reserved area can be used to store
387whatever information the client wants. In the example above, room is
388reserved in each sub-buffer to store the padding count for that
389sub-buffer. This is filled in for the previous sub-buffer in the
390subbuf_start() implementation; the padding value for the previous
391sub-buffer is passed into the subbuf_start() callback along with a
392pointer to the previous sub-buffer, since the padding value isn't
393known until a sub-buffer is filled. The subbuf_start() callback is
394also called for the first sub-buffer when the channel is opened, to
395give the client a chance to reserve space in it. In this case the
396previous sub-buffer pointer passed into the callback will be NULL, so
397the client should check the value of the prev_subbuf pointer before
398writing into the previous sub-buffer.
399
400Writing to a channel
401--------------------
402
403Kernel clients write data into the current cpu's channel buffer using
404relay_write() or __relay_write(). relay_write() is the main logging
405function - it uses local_irqsave() to protect the buffer and should be
406used if you might be logging from interrupt context. If you know
407you'll never be logging from interrupt context, you can use
408__relay_write(), which only disables preemption. These functions
409don't return a value, so you can't determine whether or not they
410failed - the assumption is that you wouldn't want to check a return
411value in the fast logging path anyway, and that they'll always succeed
412unless the buffer is full and no-overwrite mode is being used, in
413which case you can detect a failed write in the subbuf_start()
414callback by calling the relay_buf_full() helper function.
415
416relay_reserve() is used to reserve a slot in a channel buffer which
417can be written to later. This would typically be used in applications
418that need to write directly into a channel buffer without having to
419stage data in a temporary buffer beforehand. Because the actual write
420may not happen immediately after the slot is reserved, applications
421using relay_reserve() can keep a count of the number of bytes actually
422written, either in space reserved in the sub-buffers themselves or as
423a separate array. See the 'reserve' example in the relay-apps tarball
424at http://relayfs.sourceforge.net for an example of how this can be
425done. Because the write is under control of the client and is
426separated from the reserve, relay_reserve() doesn't protect the buffer
427at all - it's up to the client to provide the appropriate
428synchronization when using relay_reserve().
429
430Closing a channel
431-----------------
432
433The client calls relay_close() when it's finished using the channel.
434The channel and its associated buffers are destroyed when there are no
435longer any references to any of the channel buffers. relay_flush()
436forces a sub-buffer switch on all the channel buffers, and can be used
437to finalize and process the last sub-buffers before the channel is
438closed.
439
440Misc
441----
442
443Some applications may want to keep a channel around and re-use it
444rather than open and close a new channel for each use. relay_reset()
445can be used for this purpose - it resets a channel to its initial
446state without reallocating channel buffer memory or destroying
447existing mappings. It should however only be called when it's safe to
448do so, i.e. when the channel isn't currently being written to.
449
450Finally, there are a couple of utility callbacks that can be used for
451different purposes. buf_mapped() is called whenever a channel buffer
452is mmapped from user space and buf_unmapped() is called when it's
453unmapped. The client can use this notification to trigger actions
454within the kernel application, such as enabling/disabling logging to
455the channel.
456
457
458Resources
459=========
460
461For news, example code, mailing list, etc. see the relay interface homepage:
462
463 http://relayfs.sourceforge.net
464
465
466Credits
467=======
468
469The ideas and specs for the relay interface came about as a result of
470discussions on tracing involving the following:
471
472Michel Dagenais <michel.dagenais@polymtl.ca>
473Richard Moore <richardj_moore@uk.ibm.com>
474Bob Wisniewski <bob@watson.ibm.com>
475Karim Yaghmour <karim@opersys.com>
476Tom Zanussi <zanussi@us.ibm.com>
477
478Also thanks to Hubertus Franke for a lot of useful suggestions and bug
479reports.
diff --git a/Documentation/filesystems/relayfs.txt b/Documentation/filesystems/relayfs.txt
deleted file mode 100644
index 5832377b734..00000000000
--- a/Documentation/filesystems/relayfs.txt
+++ /dev/null
@@ -1,442 +0,0 @@
1
2relayfs - a high-speed data relay filesystem
3============================================
4
5relayfs is a filesystem designed to provide an efficient mechanism for
6tools and facilities to relay large and potentially sustained streams
7of data from kernel space to user space.
8
9The main abstraction of relayfs is the 'channel'. A channel consists
10of a set of per-cpu kernel buffers each represented by a file in the
11relayfs filesystem. Kernel clients write into a channel using
12efficient write functions which automatically log to the current cpu's
13channel buffer. User space applications mmap() the per-cpu files and
14retrieve the data as it becomes available.
15
16The format of the data logged into the channel buffers is completely
17up to the relayfs client; relayfs does however provide hooks which
18allow clients to impose some structure on the buffer data. Nor does
19relayfs implement any form of data filtering - this also is left to
20the client. The purpose is to keep relayfs as simple as possible.
21
22This document provides an overview of the relayfs API. The details of
23the function parameters are documented along with the functions in the
24filesystem code - please see that for details.
25
26Semantics
27=========
28
29Each relayfs channel has one buffer per CPU, each buffer has one or
30more sub-buffers. Messages are written to the first sub-buffer until
31it is too full to contain a new message, in which case it it is
32written to the next (if available). Messages are never split across
33sub-buffers. At this point, userspace can be notified so it empties
34the first sub-buffer, while the kernel continues writing to the next.
35
36When notified that a sub-buffer is full, the kernel knows how many
37bytes of it are padding i.e. unused. Userspace can use this knowledge
38to copy only valid data.
39
40After copying it, userspace can notify the kernel that a sub-buffer
41has been consumed.
42
43relayfs can operate in a mode where it will overwrite data not yet
44collected by userspace, and not wait for it to consume it.
45
46relayfs itself does not provide for communication of such data between
47userspace and kernel, allowing the kernel side to remain simple and
48not impose a single interface on userspace. It does provide a set of
49examples and a separate helper though, described below.
50
51klog and relay-apps example code
52================================
53
54relayfs itself is ready to use, but to make things easier, a couple
55simple utility functions and a set of examples are provided.
56
57The relay-apps example tarball, available on the relayfs sourceforge
58site, contains a set of self-contained examples, each consisting of a
59pair of .c files containing boilerplate code for each of the user and
60kernel sides of a relayfs application; combined these two sets of
61boilerplate code provide glue to easily stream data to disk, without
62having to bother with mundane housekeeping chores.
63
64The 'klog debugging functions' patch (klog.patch in the relay-apps
65tarball) provides a couple of high-level logging functions to the
66kernel which allow writing formatted text or raw data to a channel,
67regardless of whether a channel to write into exists or not, or
68whether relayfs is compiled into the kernel or is configured as a
69module. These functions allow you to put unconditional 'trace'
70statements anywhere in the kernel or kernel modules; only when there
71is a 'klog handler' registered will data actually be logged (see the
72klog and kleak examples for details).
73
74It is of course possible to use relayfs from scratch i.e. without
75using any of the relay-apps example code or klog, but you'll have to
76implement communication between userspace and kernel, allowing both to
77convey the state of buffers (full, empty, amount of padding).
78
79klog and the relay-apps examples can be found in the relay-apps
80tarball on http://relayfs.sourceforge.net
81
82
83The relayfs user space API
84==========================
85
86relayfs implements basic file operations for user space access to
87relayfs channel buffer data. Here are the file operations that are
88available and some comments regarding their behavior:
89
90open() enables user to open an _existing_ buffer.
91
92mmap() results in channel buffer being mapped into the caller's
93 memory space. Note that you can't do a partial mmap - you must
94 map the entire file, which is NRBUF * SUBBUFSIZE.
95
96read() read the contents of a channel buffer. The bytes read are
97 'consumed' by the reader i.e. they won't be available again
98 to subsequent reads. If the channel is being used in
99 no-overwrite mode (the default), it can be read at any time
100 even if there's an active kernel writer. If the channel is
101 being used in overwrite mode and there are active channel
102 writers, results may be unpredictable - users should make
103 sure that all logging to the channel has ended before using
104 read() with overwrite mode.
105
106poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
107 notified when sub-buffer boundaries are crossed.
108
109close() decrements the channel buffer's refcount. When the refcount
110 reaches 0 i.e. when no process or kernel client has the buffer
111 open, the channel buffer is freed.
112
113
114In order for a user application to make use of relayfs files, the
115relayfs filesystem must be mounted. For example,
116
117 mount -t relayfs relayfs /mnt/relay
118
119NOTE: relayfs doesn't need to be mounted for kernel clients to create
120 or use channels - it only needs to be mounted when user space
121 applications need access to the buffer data.
122
123
124The relayfs kernel API
125======================
126
127Here's a summary of the API relayfs provides to in-kernel clients:
128
129
130 channel management functions:
131
132 relay_open(base_filename, parent, subbuf_size, n_subbufs,
133 callbacks)
134 relay_close(chan)
135 relay_flush(chan)
136 relay_reset(chan)
137 relayfs_create_dir(name, parent)
138 relayfs_remove_dir(dentry)
139 relayfs_create_file(name, parent, mode, fops, data)
140 relayfs_remove_file(dentry)
141
142 channel management typically called on instigation of userspace:
143
144 relay_subbufs_consumed(chan, cpu, subbufs_consumed)
145
146 write functions:
147
148 relay_write(chan, data, length)
149 __relay_write(chan, data, length)
150 relay_reserve(chan, length)
151
152 callbacks:
153
154 subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
155 buf_mapped(buf, filp)
156 buf_unmapped(buf, filp)
157 create_buf_file(filename, parent, mode, buf, is_global)
158 remove_buf_file(dentry)
159
160 helper functions:
161
162 relay_buf_full(buf)
163 subbuf_start_reserve(buf, length)
164
165
166Creating a channel
167------------------
168
169relay_open() is used to create a channel, along with its per-cpu
170channel buffers. Each channel buffer will have an associated file
171created for it in the relayfs filesystem, which can be opened and
172mmapped from user space if desired. The files are named
173basename0...basenameN-1 where N is the number of online cpus, and by
174default will be created in the root of the filesystem. If you want a
175directory structure to contain your relayfs files, you can create it
176with relayfs_create_dir() and pass the parent directory to
177relay_open(). Clients are responsible for cleaning up any directory
178structure they create when the channel is closed - use
179relayfs_remove_dir() for that.
180
181The total size of each per-cpu buffer is calculated by multiplying the
182number of sub-buffers by the sub-buffer size passed into relay_open().
183The idea behind sub-buffers is that they're basically an extension of
184double-buffering to N buffers, and they also allow applications to
185easily implement random-access-on-buffer-boundary schemes, which can
186be important for some high-volume applications. The number and size
187of sub-buffers is completely dependent on the application and even for
188the same application, different conditions will warrant different
189values for these parameters at different times. Typically, the right
190values to use are best decided after some experimentation; in general,
191though, it's safe to assume that having only 1 sub-buffer is a bad
192idea - you're guaranteed to either overwrite data or lose events
193depending on the channel mode being used.
194
195Channel 'modes'
196---------------
197
198relayfs channels can be used in either of two modes - 'overwrite' or
199'no-overwrite'. The mode is entirely determined by the implementation
200of the subbuf_start() callback, as described below. In 'overwrite'
201mode, also known as 'flight recorder' mode, writes continuously cycle
202around the buffer and will never fail, but will unconditionally
203overwrite old data regardless of whether it's actually been consumed.
204In no-overwrite mode, writes will fail i.e. data will be lost, if the
205number of unconsumed sub-buffers equals the total number of
206sub-buffers in the channel. It should be clear that if there is no
207consumer or if the consumer can't consume sub-buffers fast enought,
208data will be lost in either case; the only difference is whether data
209is lost from the beginning or the end of a buffer.
210
211As explained above, a relayfs channel is made of up one or more
212per-cpu channel buffers, each implemented as a circular buffer
213subdivided into one or more sub-buffers. Messages are written into
214the current sub-buffer of the channel's current per-cpu buffer via the
215write functions described below. Whenever a message can't fit into
216the current sub-buffer, because there's no room left for it, the
217client is notified via the subbuf_start() callback that a switch to a
218new sub-buffer is about to occur. The client uses this callback to 1)
219initialize the next sub-buffer if appropriate 2) finalize the previous
220sub-buffer if appropriate and 3) return a boolean value indicating
221whether or not to actually go ahead with the sub-buffer switch.
222
223To implement 'no-overwrite' mode, the userspace client would provide
224an implementation of the subbuf_start() callback something like the
225following:
226
227static int subbuf_start(struct rchan_buf *buf,
228 void *subbuf,
229 void *prev_subbuf,
230 unsigned int prev_padding)
231{
232 if (prev_subbuf)
233 *((unsigned *)prev_subbuf) = prev_padding;
234
235 if (relay_buf_full(buf))
236 return 0;
237
238 subbuf_start_reserve(buf, sizeof(unsigned int));
239
240 return 1;
241}
242
243If the current buffer is full i.e. all sub-buffers remain unconsumed,
244the callback returns 0 to indicate that the buffer switch should not
245occur yet i.e. until the consumer has had a chance to read the current
246set of ready sub-buffers. For the relay_buf_full() function to make
247sense, the consumer is reponsible for notifying relayfs when
248sub-buffers have been consumed via relay_subbufs_consumed(). Any
249subsequent attempts to write into the buffer will again invoke the
250subbuf_start() callback with the same parameters; only when the
251consumer has consumed one or more of the ready sub-buffers will
252relay_buf_full() return 0, in which case the buffer switch can
253continue.
254
255The implementation of the subbuf_start() callback for 'overwrite' mode
256would be very similar:
257
258static int subbuf_start(struct rchan_buf *buf,
259 void *subbuf,
260 void *prev_subbuf,
261 unsigned int prev_padding)
262{
263 if (prev_subbuf)
264 *((unsigned *)prev_subbuf) = prev_padding;
265
266 subbuf_start_reserve(buf, sizeof(unsigned int));
267
268 return 1;
269}
270
271In this case, the relay_buf_full() check is meaningless and the
272callback always returns 1, causing the buffer switch to occur
273unconditionally. It's also meaningless for the client to use the
274relay_subbufs_consumed() function in this mode, as it's never
275consulted.
276
277The default subbuf_start() implementation, used if the client doesn't
278define any callbacks, or doesn't define the subbuf_start() callback,
279implements the simplest possible 'no-overwrite' mode i.e. it does
280nothing but return 0.
281
282Header information can be reserved at the beginning of each sub-buffer
283by calling the subbuf_start_reserve() helper function from within the
284subbuf_start() callback. This reserved area can be used to store
285whatever information the client wants. In the example above, room is
286reserved in each sub-buffer to store the padding count for that
287sub-buffer. This is filled in for the previous sub-buffer in the
288subbuf_start() implementation; the padding value for the previous
289sub-buffer is passed into the subbuf_start() callback along with a
290pointer to the previous sub-buffer, since the padding value isn't
291known until a sub-buffer is filled. The subbuf_start() callback is
292also called for the first sub-buffer when the channel is opened, to
293give the client a chance to reserve space in it. In this case the
294previous sub-buffer pointer passed into the callback will be NULL, so
295the client should check the value of the prev_subbuf pointer before
296writing into the previous sub-buffer.
297
298Writing to a channel
299--------------------
300
301kernel clients write data into the current cpu's channel buffer using
302relay_write() or __relay_write(). relay_write() is the main logging
303function - it uses local_irqsave() to protect the buffer and should be
304used if you might be logging from interrupt context. If you know
305you'll never be logging from interrupt context, you can use
306__relay_write(), which only disables preemption. These functions
307don't return a value, so you can't determine whether or not they
308failed - the assumption is that you wouldn't want to check a return
309value in the fast logging path anyway, and that they'll always succeed
310unless the buffer is full and no-overwrite mode is being used, in
311which case you can detect a failed write in the subbuf_start()
312callback by calling the relay_buf_full() helper function.
313
314relay_reserve() is used to reserve a slot in a channel buffer which
315can be written to later. This would typically be used in applications
316that need to write directly into a channel buffer without having to
317stage data in a temporary buffer beforehand. Because the actual write
318may not happen immediately after the slot is reserved, applications
319using relay_reserve() can keep a count of the number of bytes actually
320written, either in space reserved in the sub-buffers themselves or as
321a separate array. See the 'reserve' example in the relay-apps tarball
322at http://relayfs.sourceforge.net for an example of how this can be
323done. Because the write is under control of the client and is
324separated from the reserve, relay_reserve() doesn't protect the buffer
325at all - it's up to the client to provide the appropriate
326synchronization when using relay_reserve().
327
328Closing a channel
329-----------------
330
331The client calls relay_close() when it's finished using the channel.
332The channel and its associated buffers are destroyed when there are no
333longer any references to any of the channel buffers. relay_flush()
334forces a sub-buffer switch on all the channel buffers, and can be used
335to finalize and process the last sub-buffers before the channel is
336closed.
337
338Creating non-relay files
339------------------------
340
341relay_open() automatically creates files in the relayfs filesystem to
342represent the per-cpu kernel buffers; it's often useful for
343applications to be able to create their own files alongside the relay
344files in the relayfs filesystem as well e.g. 'control' files much like
345those created in /proc or debugfs for similar purposes, used to
346communicate control information between the kernel and user sides of a
347relayfs application. For this purpose the relayfs_create_file() and
348relayfs_remove_file() API functions exist. For relayfs_create_file(),
349the caller passes in a set of user-defined file operations to be used
350for the file and an optional void * to a user-specified data item,
351which will be accessible via inode->u.generic_ip (see the relay-apps
352tarball for examples). The file_operations are a required parameter
353to relayfs_create_file() and thus the semantics of these files are
354completely defined by the caller.
355
356See the relay-apps tarball at http://relayfs.sourceforge.net for
357examples of how these non-relay files are meant to be used.
358
359Creating relay files in other filesystems
360-----------------------------------------
361
362By default of course, relay_open() creates relay files in the relayfs
363filesystem. Because relay_file_operations is exported, however, it's
364also possible to create and use relay files in other pseudo-filesytems
365such as debugfs.
366
367For this purpose, two callback functions are provided,
368create_buf_file() and remove_buf_file(). create_buf_file() is called
369once for each per-cpu buffer from relay_open() to allow the client to
370create a file to be used to represent the corresponding buffer; if
371this callback is not defined, the default implementation will create
372and return a file in the relayfs filesystem to represent the buffer.
373The callback should return the dentry of the file created to represent
374the relay buffer. Note that the parent directory passed to
375relay_open() (and passed along to the callback), if specified, must
376exist in the same filesystem the new relay file is created in. If
377create_buf_file() is defined, remove_buf_file() must also be defined;
378it's responsible for deleting the file(s) created in create_buf_file()
379and is called during relay_close().
380
381The create_buf_file() implementation can also be defined in such a way
382as to allow the creation of a single 'global' buffer instead of the
383default per-cpu set. This can be useful for applications interested
384mainly in seeing the relative ordering of system-wide events without
385the need to bother with saving explicit timestamps for the purpose of
386merging/sorting per-cpu files in a postprocessing step.
387
388To have relay_open() create a global buffer, the create_buf_file()
389implementation should set the value of the is_global outparam to a
390non-zero value in addition to creating the file that will be used to
391represent the single buffer. In the case of a global buffer,
392create_buf_file() and remove_buf_file() will be called only once. The
393normal channel-writing functions e.g. relay_write() can still be used
394- writes from any cpu will transparently end up in the global buffer -
395but since it is a global buffer, callers should make sure they use the
396proper locking for such a buffer, either by wrapping writes in a
397spinlock, or by copying a write function from relayfs_fs.h and
398creating a local version that internally does the proper locking.
399
400See the 'exported-relayfile' examples in the relay-apps tarball for
401examples of creating and using relay files in debugfs.
402
403Misc
404----
405
406Some applications may want to keep a channel around and re-use it
407rather than open and close a new channel for each use. relay_reset()
408can be used for this purpose - it resets a channel to its initial
409state without reallocating channel buffer memory or destroying
410existing mappings. It should however only be called when it's safe to
411do so i.e. when the channel isn't currently being written to.
412
413Finally, there are a couple of utility callbacks that can be used for
414different purposes. buf_mapped() is called whenever a channel buffer
415is mmapped from user space and buf_unmapped() is called when it's
416unmapped. The client can use this notification to trigger actions
417within the kernel application, such as enabling/disabling logging to
418the channel.
419
420
421Resources
422=========
423
424For news, example code, mailing list, etc. see the relayfs homepage:
425
426 http://relayfs.sourceforge.net
427
428
429Credits
430=======
431
432The ideas and specs for relayfs came about as a result of discussions
433on tracing involving the following:
434
435Michel Dagenais <michel.dagenais@polymtl.ca>
436Richard Moore <richardj_moore@uk.ibm.com>
437Bob Wisniewski <bob@watson.ibm.com>
438Karim Yaghmour <karim@opersys.com>
439Tom Zanussi <zanussi@us.ibm.com>
440
441Also thanks to Hubertus Franke for a lot of useful suggestions and bug
442reports.
diff --git a/Documentation/input/joystick.txt b/Documentation/input/joystick.txt
index d53b857a371..841c353297e 100644
--- a/Documentation/input/joystick.txt
+++ b/Documentation/input/joystick.txt
@@ -39,7 +39,6 @@ them. Bug reports and success stories are also welcome.
39 39
40 The input project website is at: 40 The input project website is at:
41 41
42 http://www.suse.cz/development/input/
43 http://atrey.karlin.mff.cuni.cz/~vojtech/input/ 42 http://atrey.karlin.mff.cuni.cz/~vojtech/input/
44 43
45 There is also a mailing list for the driver at: 44 There is also a mailing list for the driver at:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b50595a0550..7947cede871 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1183,6 +1183,8 @@ running once the system is up.
1183 Mechanism 2. 1183 Mechanism 2.
1184 nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI 1184 nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI
1185 Configuration 1185 Configuration
1186 mmconf [IA-32,X86_64] Force MMCONFIG. This is useful
1187 to override the builtin blacklist.
1186 nomsi [MSI] If the PCI_MSI kernel config parameter is 1188 nomsi [MSI] If the PCI_MSI kernel config parameter is
1187 enabled, this kernel boot option can be used to 1189 enabled, this kernel boot option can be used to
1188 disable the use of MSI interrupts system-wide. 1190 disable the use of MSI interrupts system-wide.
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index 8d9bffbd192..949f7b5a205 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -247,7 +247,7 @@ the object-specific fields, which include:
247- default_attrs: Default attributes to be exported via sysfs when the 247- default_attrs: Default attributes to be exported via sysfs when the
248 object is registered.Note that the last attribute has to be 248 object is registered.Note that the last attribute has to be
249 initialized to NULL ! You can find a complete implementation 249 initialized to NULL ! You can find a complete implementation
250 in drivers/block/genhd.c 250 in block/genhd.c
251 251
252 252
253Instances of struct kobj_type are not registered; only referenced by 253Instances of struct kobj_type are not registered; only referenced by
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d46338af600..3e0c017e787 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
294 Default: 87380*2 bytes. 294 Default: 87380*2 bytes.
295 295
296tcp_mem - vector of 3 INTEGERs: min, pressure, max 296tcp_mem - vector of 3 INTEGERs: min, pressure, max
297 low: below this number of pages TCP is not bothered about its 297 min: below this number of pages TCP is not bothered about its
298 memory appetite. 298 memory appetite.
299 299
300 pressure: when amount of memory allocated by TCP exceeds this number 300 pressure: when amount of memory allocated by TCP exceeds this number
301 of pages, TCP moderates its memory consumption and enters memory 301 of pages, TCP moderates its memory consumption and enters memory
302 pressure mode, which is exited when memory consumption falls 302 pressure mode, which is exited when memory consumption falls
303 under "low". 303 under "min".
304 304
305 high: number of pages allowed for queueing by all TCP sockets. 305 max: number of pages allowed for queueing by all TCP sockets.
306 306
307 Defaults are calculated at boot time from amount of available 307 Defaults are calculated at boot time from amount of available
308 memory. 308 memory.
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 8c48b8a27b9..5c0ba235f5a 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1136,10 +1136,10 @@ Sense and level information should be encoded as follows:
1136 Devices connected to openPIC-compatible controllers should encode 1136 Devices connected to openPIC-compatible controllers should encode
1137 sense and polarity as follows: 1137 sense and polarity as follows:
1138 1138
1139 0 = high to low edge sensitive type enabled 1139 0 = low to high edge sensitive type enabled
1140 1 = active low level sensitive type enabled 1140 1 = active low level sensitive type enabled
1141 2 = low to high edge sensitive type enabled 1141 2 = active high level sensitive type enabled
1142 3 = active high level sensitive type enabled 1142 3 = high to low edge sensitive type enabled
1143 1143
1144 ISA PIC interrupt controllers should adhere to the ISA PIC 1144 ISA PIC interrupt controllers should adhere to the ISA PIC
1145 encodings listed below: 1145 encodings listed below:
diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid
index c173806c91f..a056bbe67c7 100644
--- a/Documentation/scsi/ChangeLog.megaraid
+++ b/Documentation/scsi/ChangeLog.megaraid
@@ -1,3 +1,126 @@
1Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com>
2Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
3Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
4
51. Fixed a bug in megaraid_init_mbox().
6 Customer reported "garbage in file on x86_64 platform".
7 Root Cause: the driver registered controllers as 64-bit DMA capable
8 for those which are not support it.
9 Fix: Made change in the function inserting identification machanism
10 identifying 64-bit DMA capable controllers.
11
12 > -----Original Message-----
13 > From: Vasily Averin [mailto:vvs@sw.ru]
14 > Sent: Thursday, May 04, 2006 2:49 PM
15 > To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul;
16 > Ju, Seokmann; Bagalkote, Sreenivas;
17 > James.Bottomley@SteelEye.com; devel@openvz.org
18 > Subject: megaraid_mbox: garbage in file
19 >
20 > Hello all,
21 >
22 > I've investigated customers claim on the unstable work of
23 > their node and found a
24 > strange effect: reading from some files leads to the
25 > "attempt to access beyond end of device" messages.
26 >
27 > I've checked filesystem, memory on the node, motherboard BIOS
28 > version, but it
29 > does not help and issue still has been reproduced by simple
30 > file reading.
31 >
32 > Reproducer is simple:
33 >
34 > echo 0xffffffff >/proc/sys/dev/scsi/logging_level ;
35 > cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ;
36 > echo 0 >/proc/sys/dev/scsi/logging
37 >
38 > It leads to the following messages in dmesg
39 >
40 > sd_init_command: disk=sda, block=871769260, count=26
41 > sda : block=871769260
42 > sda : reading 26/26 512 byte blocks.
43 > scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420)
44 > sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0:
45 > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
46 > buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40,
47 > queuecommand 0xc0344010
48 > leaving scsi_dispatch_cmnd()
49 > scsi_delete_timer: scmd: f79ed980, rtn: 1
50 > sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0:
51 > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
52 > scsi host busy 1 failed 0
53 > sd 0:1:0:0: Notifying upper driver of completion (result 0)
54 > sd_rw_intr: sda: res=0x0
55 > 26 sectors total, 13312 bytes done.
56 > use_sg is 4
57 > attempt to access beyond end of device
58 > sda6: rw=0, want=1044134458, limit=951401367
59 > Buffer I/O error on device sda6, logical block 522067228
60 > attempt to access beyond end of device
61
622. When INQUIRY with EVPD bit set issued to the MegaRAID controller,
63 system memory gets corrupted.
64 Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set
65 incorrectly.
66 Fix: MegaRAID F/W has fixed the problem and being process of release,
67 soon. Meanwhile, driver will filter out the request.
68
693. One of member in the data structure of the driver leads unaligne
70 issue on 64-bit platform.
71 Customer reporeted "kernel unaligned access addrss" issue when
72 application communicates with MegaRAID HBA driver.
73 Root Cause: in uioc_t structure, one of member had misaligned and it
74 led system to display the error message.
75 Fix: A patch submitted to community from following folk.
76
77 > -----Original Message-----
78 > From: linux-scsi-owner@vger.kernel.org
79 > [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi
80 > Sent: Wednesday, July 12, 2006 4:20 AM
81 > To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org
82 > Subject: Re: Help: strange messages from kernel on IA64 platform
83 >
84 > Hi,
85 >
86 > I saw same message.
87 >
88 > When GAM(Global Array Manager) is started, The following
89 > message output.
90 > kernel: kernel unaligned access to 0xe0000001fe1080d4,
91 > ip=0xa000000200053371
92 >
93 > The uioc structure used by ioctl is defined by packed,
94 > the allignment of each member are disturbed.
95 > In a 64 bit structure, the allignment of member doesn't fit 64 bit
96 > boundary. this causes this messages.
97 > In a 32 bit structure, we don't see the message because the allinment
98 > of member fit 32 bit boundary even if packed is specified.
99 >
100 > patch
101 > I Add 32 bit dummy member to fit 64 bit boundary. I tested.
102 > We confirmed this patch fix the problem by IA64 server.
103 >
104 > **************************************************************
105 > ****************
106 > --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig
107 > 2006-04-03 17:13:03.000000000 +0900
108 > +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h
109 > 2006-04-03 17:14:09.000000000 +0900
110 > @@ -132,6 +132,10 @@
111 > /* Driver Data: */
112 > void __user * user_data;
113 > uint32_t user_data_len;
114 > +
115 > + /* 64bit alignment */
116 > + uint32_t pad_0xBC;
117 > +
118 > mraid_passthru_t __user *user_pthru;
119 >
120 > mraid_passthru_t *pthru32;
121 > **************************************************************
122 > ****************
123
1Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com> 124Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
2Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) 125Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
3Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) 126Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 0b62c62142c..5c3a5190596 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -25,6 +25,7 @@ Currently, these files are in /proc/sys/fs:
25- inode-state 25- inode-state
26- overflowuid 26- overflowuid
27- overflowgid 27- overflowgid
28- suid_dumpable
28- super-max 29- super-max
29- super-nr 30- super-nr
30 31
@@ -131,6 +132,25 @@ The default is 65534.
131 132
132============================================================== 133==============================================================
133 134
135suid_dumpable:
136
137This value can be used to query and set the core dump mode for setuid
138or otherwise protected/tainted binaries. The modes are
139
1400 - (default) - traditional behaviour. Any process which has changed
141 privilege levels or is execute only will not be dumped
1421 - (debug) - all processes dump core when possible. The core dump is
143 owned by the current user and no security is applied. This is
144 intended for system debugging situations only. Ptrace is unchecked.
1452 - (suidsafe) - any binary which normally would not be dumped is dumped
146 readable by root only. This allows the end user to remove
147 such a dump but not access it directly. For security reasons
148 core dumps in this mode will not overwrite one another or
149 other files. This mode is appropriate when adminstrators are
150 attempting to debug problems in a normal environment.
151
152==============================================================
153
134super-max & super-nr: 154super-max & super-nr:
135 155
136These numbers control the maximum number of superblocks, and 156These numbers control the maximum number of superblocks, and
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 7345c338080..89bf8c20a58 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -50,7 +50,6 @@ show up in /proc/sys/kernel:
50- shmmax [ sysv ipc ] 50- shmmax [ sysv ipc ]
51- shmmni 51- shmmni
52- stop-a [ SPARC only ] 52- stop-a [ SPARC only ]
53- suid_dumpable
54- sysrq ==> Documentation/sysrq.txt 53- sysrq ==> Documentation/sysrq.txt
55- tainted 54- tainted
56- threads-max 55- threads-max
@@ -310,25 +309,6 @@ kernel. This value defaults to SHMMAX.
310 309
311============================================================== 310==============================================================
312 311
313suid_dumpable:
314
315This value can be used to query and set the core dump mode for setuid
316or otherwise protected/tainted binaries. The modes are
317
3180 - (default) - traditional behaviour. Any process which has changed
319 privilege levels or is execute only will not be dumped
3201 - (debug) - all processes dump core when possible. The core dump is
321 owned by the current user and no security is applied. This is
322 intended for system debugging situations only. Ptrace is unchecked.
3232 - (suidsafe) - any binary which normally would not be dumped is dumped
324 readable by root only. This allows the end user to remove
325 such a dump but not access it directly. For security reasons
326 core dumps in this mode will not overwrite one another or
327 other files. This mode is appropriate when adminstrators are
328 attempting to debug problems in a normal environment.
329
330==============================================================
331
332tainted: 312tainted:
333 313
334Non-zero if the kernel has been tainted. Numeric values, which 314Non-zero if the kernel has been tainted. Numeric values, which
diff --git a/MAINTAINERS b/MAINTAINERS
index e3e1515ba5a..25cd7073a20 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -889,6 +889,12 @@ M: rdunlap@xenotime.net
889T: git http://tali.admingilde.org/git/linux-docbook.git 889T: git http://tali.admingilde.org/git/linux-docbook.git
890S: Maintained 890S: Maintained
891 891
892DOCKING STATION DRIVER
893P: Kristen Carlson Accardi
894M: kristen.c.accardi@intel.com
895L: linux-acpi@vger.kernel.org
896S: Maintained
897
892DOUBLETALK DRIVER 898DOUBLETALK DRIVER
893P: James R. Van Zandt 899P: James R. Van Zandt
894M: jrv@vanzandt.mv.com 900M: jrv@vanzandt.mv.com
@@ -2656,6 +2662,14 @@ M: chrisw@sous-sol.org
2656L: stable@kernel.org 2662L: stable@kernel.org
2657S: Maintained 2663S: Maintained
2658 2664
2665STABLE BRANCH:
2666P: Greg Kroah-Hartman
2667M: greg@kroah.com
2668P: Chris Wright
2669M: chrisw@sous-sol.org
2670L: stable@kernel.org
2671S: Maintained
2672
2659TPM DEVICE DRIVER 2673TPM DEVICE DRIVER
2660P: Kylene Hall 2674P: Kylene Hall
2661M: kjhall@us.ibm.com 2675M: kjhall@us.ibm.com
@@ -3282,10 +3296,11 @@ S: Maintained
3282 3296
3283XFS FILESYSTEM 3297XFS FILESYSTEM
3284P: Silicon Graphics Inc 3298P: Silicon Graphics Inc
3299P: Tim Shimmin, David Chatterton
3285M: xfs-masters@oss.sgi.com 3300M: xfs-masters@oss.sgi.com
3286M: nathans@sgi.com
3287L: xfs@oss.sgi.com 3301L: xfs@oss.sgi.com
3288W: http://oss.sgi.com/projects/xfs 3302W: http://oss.sgi.com/projects/xfs
3303T: git git://oss.sgi.com:8090/xfs/xfs-2.6
3289S: Supported 3304S: Supported
3290 3305
3291X86 3-LEVEL PAGING (PAE) SUPPORT 3306X86 3-LEVEL PAGING (PAE) SUPPORT
diff --git a/Makefile b/Makefile
index c2f78a5a549..33559b56644 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 18 3SUBLEVEL = 18
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -309,9 +309,6 @@ CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
309 309
310CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 310CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
311 -fno-strict-aliasing -fno-common 311 -fno-strict-aliasing -fno-common
312# Force gcc to behave correct even for buggy distributions
313CFLAGS += $(call cc-option, -fno-stack-protector)
314
315AFLAGS := -D__ASSEMBLY__ 312AFLAGS := -D__ASSEMBLY__
316 313
317# Read KERNELRELEASE from include/config/kernel.release (if it exists) 314# Read KERNELRELEASE from include/config/kernel.release (if it exists)
@@ -436,12 +433,13 @@ core-y := usr/
436endif # KBUILD_EXTMOD 433endif # KBUILD_EXTMOD
437 434
438ifeq ($(dot-config),1) 435ifeq ($(dot-config),1)
439# In this section, we need .config 436# Read in config
437-include include/config/auto.conf
440 438
439ifeq ($(KBUILD_EXTMOD),)
441# Read in dependencies to all Kconfig* files, make sure to run 440# Read in dependencies to all Kconfig* files, make sure to run
442# oldconfig if changes are detected. 441# oldconfig if changes are detected.
443-include include/config/auto.conf.cmd 442-include include/config/auto.conf.cmd
444-include include/config/auto.conf
445 443
446# To avoid any implicit rule to kick in, define an empty command 444# To avoid any implicit rule to kick in, define an empty command
447$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; 445$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
@@ -451,16 +449,27 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
451# if auto.conf.cmd is missing then we are probably in a cleaned tree so 449# if auto.conf.cmd is missing then we are probably in a cleaned tree so
452# we execute the config step to be sure to catch updated Kconfig files 450# we execute the config step to be sure to catch updated Kconfig files
453include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd 451include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
454ifeq ($(KBUILD_EXTMOD),)
455 $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig 452 $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
456else 453else
457 $(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it) 454# external modules needs include/linux/autoconf.h and include/config/auto.conf
458endif 455# but do not care if they are up-to-date. Use auto.conf to trigger the test
456PHONY += include/config/auto.conf
457
458include/config/auto.conf:
459 $(Q)test -e include/linux/autoconf.h -a -e $@ || ( \
460 echo; \
461 echo " ERROR: Kernel configuration is invalid."; \
462 echo " include/linux/autoconf.h or $@ are missing."; \
463 echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
464 echo; \
465 /bin/false)
466
467endif # KBUILD_EXTMOD
459 468
460else 469else
461# Dummy target needed, because used as prerequisite 470# Dummy target needed, because used as prerequisite
462include/config/auto.conf: ; 471include/config/auto.conf: ;
463endif 472endif # $(dot-config)
464 473
465# The all: target is the default when no target is given on the 474# The all: target is the default when no target is given on the
466# command line. 475# command line.
@@ -474,6 +483,8 @@ else
474CFLAGS += -O2 483CFLAGS += -O2
475endif 484endif
476 485
486include $(srctree)/arch/$(ARCH)/Makefile
487
477ifdef CONFIG_FRAME_POINTER 488ifdef CONFIG_FRAME_POINTER
478CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,) 489CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
479else 490else
@@ -488,7 +499,8 @@ ifdef CONFIG_DEBUG_INFO
488CFLAGS += -g 499CFLAGS += -g
489endif 500endif
490 501
491include $(srctree)/arch/$(ARCH)/Makefile 502# Force gcc to behave correct even for buggy distributions
503CFLAGS += $(call cc-option, -fno-stack-protector)
492 504
493# arch Makefile may override CC so keep this after arch Makefile is included 505# arch Makefile may override CC so keep this after arch Makefile is included
494NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) 506NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 3345c6d0fd1..92873cdee31 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -47,7 +47,8 @@ comma = ,
47# testing for a specific architecture or later rather impossible. 47# testing for a specific architecture or later rather impossible.
48arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 48arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
49arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) 49arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k)
50arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4) 50arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
51arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t
51arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 52arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
52arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 53arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
53 54
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 5b7c26395b4..028bdc9228f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -179,17 +179,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
179static inline struct safe_buffer * 179static inline struct safe_buffer *
180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) 180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
181{ 181{
182 struct safe_buffer *b = NULL; 182 struct safe_buffer *b, *rb = NULL;
183 unsigned long flags; 183 unsigned long flags;
184 184
185 read_lock_irqsave(&device_info->lock, flags); 185 read_lock_irqsave(&device_info->lock, flags);
186 186
187 list_for_each_entry(b, &device_info->safe_buffers, node) 187 list_for_each_entry(b, &device_info->safe_buffers, node)
188 if (b->safe_dma_addr == safe_dma_addr) 188 if (b->safe_dma_addr == safe_dma_addr) {
189 rb = b;
189 break; 190 break;
191 }
190 192
191 read_unlock_irqrestore(&device_info->lock, flags); 193 read_unlock_irqrestore(&device_info->lock, flags);
192 return b; 194 return rb;
193} 195}
194 196
195static inline void 197static inline void
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c
index 35c9a64ac14..4e5445cfb0e 100644
--- a/arch/arm/common/rtctime.c
+++ b/arch/arm/common/rtctime.c
@@ -68,6 +68,7 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc
68 rtc_time_to_tm(next_time, next); 68 rtc_time_to_tm(next_time, next);
69 } 69 }
70} 70}
71EXPORT_SYMBOL(rtc_next_alarm_time);
71 72
72static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) 73static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm)
73{ 74{
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index a331c12cead..29818bd3248 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -618,7 +618,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
618{ 618{
619 struct sa1111 *sachip; 619 struct sa1111 *sachip;
620 unsigned long id; 620 unsigned long id;
621 unsigned int has_devs, val; 621 unsigned int has_devs;
622 int i, ret = -ENODEV; 622 int i, ret = -ENODEV;
623 623
624 sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL); 624 sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL);
@@ -669,6 +669,9 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
669 sa1111_wake(sachip); 669 sa1111_wake(sachip);
670 670
671#ifdef CONFIG_ARCH_SA1100 671#ifdef CONFIG_ARCH_SA1100
672 {
673 unsigned int val;
674
672 /* 675 /*
673 * The SDRAM configuration of the SA1110 and the SA1111 must 676 * The SDRAM configuration of the SA1110 and the SA1111 must
674 * match. This is very important to ensure that SA1111 accesses 677 * match. This is very important to ensure that SA1111 accesses
@@ -692,6 +695,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
692 * Enable the SA1110 memory bus request and grant signals. 695 * Enable the SA1110 memory bus request and grant signals.
693 */ 696 */
694 sa1110_mb_enable(); 697 sa1110_mb_enable();
698 }
695#endif 699#endif
696 700
697 /* 701 /*
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index f0c0cdb1c18..1320a0efca7 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -13,12 +13,11 @@ obj-y := compat.o entry-armv.o entry-common.o irq.o \
13obj-$(CONFIG_APM) += apm.o 13obj-$(CONFIG_APM) += apm.o
14obj-$(CONFIG_ISA_DMA_API) += dma.o 14obj-$(CONFIG_ISA_DMA_API) += dma.o
15obj-$(CONFIG_ARCH_ACORN) += ecard.o 15obj-$(CONFIG_ARCH_ACORN) += ecard.o
16obj-$(CONFIG_FOOTBRIDGE) += isa.o
17obj-$(CONFIG_FIQ) += fiq.o 16obj-$(CONFIG_FIQ) += fiq.o
18obj-$(CONFIG_MODULES) += armksyms.o module.o 17obj-$(CONFIG_MODULES) += armksyms.o module.o
19obj-$(CONFIG_ARTHUR) += arthur.o 18obj-$(CONFIG_ARTHUR) += arthur.o
20obj-$(CONFIG_ISA_DMA) += dma-isa.o 19obj-$(CONFIG_ISA_DMA) += dma-isa.o
21obj-$(CONFIG_PCI) += bios32.o 20obj-$(CONFIG_PCI) += bios32.o isa.o
22obj-$(CONFIG_SMP) += smp.o 21obj-$(CONFIG_SMP) += smp.o
23obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 22obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
24 23
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7ea5f01dfc7..de4e3313790 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -634,6 +634,14 @@ ENTRY(__switch_to)
634 * purpose. 634 * purpose.
635 */ 635 */
636 636
637 .macro usr_ret, reg
638#ifdef CONFIG_ARM_THUMB
639 bx \reg
640#else
641 mov pc, \reg
642#endif
643 .endm
644
637 .align 5 645 .align 5
638 .globl __kuser_helper_start 646 .globl __kuser_helper_start
639__kuser_helper_start: 647__kuser_helper_start:
@@ -675,7 +683,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
675#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) 683#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
676 mcr p15, 0, r0, c7, c10, 5 @ dmb 684 mcr p15, 0, r0, c7, c10, 5 @ dmb
677#endif 685#endif
678 mov pc, lr 686 usr_ret lr
679 687
680 .align 5 688 .align 5
681 689
@@ -778,7 +786,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
778 mov r0, #-1 786 mov r0, #-1
779 adds r0, r0, #0 787 adds r0, r0, #0
780#endif 788#endif
781 mov pc, lr 789 usr_ret lr
782 790
783#else 791#else
784 792
@@ -792,7 +800,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
792#ifdef CONFIG_SMP 800#ifdef CONFIG_SMP
793 mcr p15, 0, r0, c7, c10, 5 @ dmb 801 mcr p15, 0, r0, c7, c10, 5 @ dmb
794#endif 802#endif
795 mov pc, lr 803 usr_ret lr
796 804
797#endif 805#endif
798 806
@@ -834,16 +842,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
834__kuser_get_tls: @ 0xffff0fe0 842__kuser_get_tls: @ 0xffff0fe0
835 843
836#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) 844#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
837
838 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 845 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
839 mov pc, lr
840
841#else 846#else
842
843 mrc p15, 0, r0, c13, c0, 3 @ read TLS register 847 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
844 mov pc, lr
845
846#endif 848#endif
849 usr_ret lr
847 850
848 .rep 5 851 .rep 5
849 .word 0 @ pad up to __kuser_helper_version 852 .word 0 @ pad up to __kuser_helper_version
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4fe386eea4b..5365d4e5949 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -118,7 +118,7 @@ ENTRY(secondary_startup)
118 sub r4, r4, r5 @ mmu has been enabled 118 sub r4, r4, r5 @ mmu has been enabled
119 ldr r4, [r7, r4] @ get secondary_data.pgdir 119 ldr r4, [r7, r4] @ get secondary_data.pgdir
120 adr lr, __enable_mmu @ return address 120 adr lr, __enable_mmu @ return address
121 add pc, r10, #12 @ initialise processor 121 add pc, r10, #PROCINFO_INITFUNC @ initialise processor
122 @ (return control reg) 122 @ (return control reg)
123 123
124 /* 124 /*
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
index 685c3e591a7..54bbd9fe255 100644
--- a/arch/arm/kernel/isa.c
+++ b/arch/arm/kernel/isa.c
@@ -3,21 +3,14 @@
3 * 3 *
4 * Copyright (C) 1999 Phil Blundell 4 * Copyright (C) 1999 Phil Blundell
5 * 5 *
6 * ISA shared memory and I/O port support
7 */
8
9/*
10 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 *
11 * ISA shared memory and I/O port support, and is required to support
12 * iopl, inb, outb and friends in userspace via glibc emulation.
14 */ 13 */
15
16/*
17 * Nothing about this is actually ARM specific. One day we could move
18 * it into kernel/resource.c or some place like that.
19 */
20
21#include <linux/stddef.h> 14#include <linux/stddef.h>
22#include <linux/types.h> 15#include <linux/types.h>
23#include <linux/fs.h> 16#include <linux/fs.h>
@@ -27,21 +20,49 @@
27static unsigned int isa_membase, isa_portbase, isa_portshift; 20static unsigned int isa_membase, isa_portbase, isa_portshift;
28 21
29static ctl_table ctl_isa_vars[4] = { 22static ctl_table ctl_isa_vars[4] = {
30 {BUS_ISA_MEM_BASE, "membase", &isa_membase, 23 {
31 sizeof(isa_membase), 0444, NULL, &proc_dointvec}, 24 .ctl_name = BUS_ISA_MEM_BASE,
32 {BUS_ISA_PORT_BASE, "portbase", &isa_portbase, 25 .procname = "membase",
33 sizeof(isa_portbase), 0444, NULL, &proc_dointvec}, 26 .data = &isa_membase,
34 {BUS_ISA_PORT_SHIFT, "portshift", &isa_portshift, 27 .maxlen = sizeof(isa_membase),
35 sizeof(isa_portshift), 0444, NULL, &proc_dointvec}, 28 .mode = 0444,
36 {0} 29 .proc_handler = &proc_dointvec,
30 }, {
31 .ctl_name = BUS_ISA_PORT_BASE,
32 .procname = "portbase",
33 .data = &isa_portbase,
34 .maxlen = sizeof(isa_portbase),
35 .mode = 0444,
36 .proc_handler = &proc_dointvec,
37 }, {
38 .ctl_name = BUS_ISA_PORT_SHIFT,
39 .procname = "portshift",
40 .data = &isa_portshift,
41 .maxlen = sizeof(isa_portshift),
42 .mode = 0444,
43 .proc_handler = &proc_dointvec,
44 }, {0}
37}; 45};
38 46
39static struct ctl_table_header *isa_sysctl_header; 47static struct ctl_table_header *isa_sysctl_header;
40 48
41static ctl_table ctl_isa[2] = {{CTL_BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars}, 49static ctl_table ctl_isa[2] = {
42 {0}}; 50 {
43static ctl_table ctl_bus[2] = {{CTL_BUS, "bus", NULL, 0, 0555, ctl_isa}, 51 .ctl_name = CTL_BUS_ISA,
44 {0}}; 52 .procname = "isa",
53 .mode = 0555,
54 .child = ctl_isa_vars,
55 }, {0}
56};
57
58static ctl_table ctl_bus[2] = {
59 {
60 .ctl_name = CTL_BUS,
61 .procname = "bus",
62 .mode = 0555,
63 .child = ctl_isa,
64 }, {0}
65};
45 66
46void __init 67void __init
47register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift) 68register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 4e29dd03e58..aeeed806f99 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -233,7 +233,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
233 spin_unlock_irq(&die_lock); 233 spin_unlock_irq(&die_lock);
234 234
235 if (panic_on_oops) 235 if (panic_on_oops)
236 panic("Fatal exception: panic_on_oops"); 236 panic("Fatal exception");
237 237
238 do_exit(SIGSEGV); 238 do_exit(SIGSEGV);
239} 239}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 607ed1f5b3f..823e25d4547 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -35,7 +35,6 @@
35 35
36extern int setup_arm_irq(int, struct irqaction *); 36extern int setup_arm_irq(int, struct irqaction *);
37extern void pcibios_report_status(u_int status_mask, int warn); 37extern void pcibios_report_status(u_int status_mask, int warn);
38extern void register_isa_ports(unsigned int, unsigned int, unsigned int);
39 38
40static unsigned long 39static unsigned long
41dc21285_base_address(struct pci_bus *bus, unsigned int devfn) 40dc21285_base_address(struct pci_bus *bus, unsigned int devfn)
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index f9043592e29..4418f6d7572 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -600,4 +600,6 @@ void __init pci_v3_postinit(void)
600 printk(KERN_ERR "PCI: unable to grab local bus timeout " 600 printk(KERN_ERR "PCI: unable to grab local bus timeout "
601 "interrupt: %d\n", ret); 601 "interrupt: %d\n", ret);
602#endif 602#endif
603
604 register_isa_ports(PHYS_PCI_MEM_BASE, PHYS_PCI_IO_BASE, 0);
603} 605}
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 2d40fe1145f..9562177b5fe 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -532,8 +532,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
532 return -EIO; 532 return -EIO;
533} 533}
534 534
535EXPORT_SYMBOL(pci_set_dma_mask);
536EXPORT_SYMBOL(pci_set_consistent_dma_mask);
537EXPORT_SYMBOL(ixp4xx_pci_read); 535EXPORT_SYMBOL(ixp4xx_pci_read);
538EXPORT_SYMBOL(ixp4xx_pci_write); 536EXPORT_SYMBOL(ixp4xx_pci_write);
539 537
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 654e2eed81f..30f1300e0e2 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -107,9 +107,9 @@ static struct flash_platform_data gtwx5715_flash_data = {
107 .width = 2, 107 .width = 2,
108}; 108};
109 109
110static struct gtw5715_flash_resource = { 110static struct resource gtwx5715_flash_resource = {
111 .flags = IORESOURCE_MEM, 111 .flags = IORESOURCE_MEM,
112} 112};
113 113
114static struct platform_device gtwx5715_flash = { 114static struct platform_device gtwx5715_flash = {
115 .name = "IXP4XX-Flash", 115 .name = "IXP4XX-Flash",
@@ -130,9 +130,6 @@ static void __init gtwx5715_init(void)
130{ 130{
131 ixp4xx_sys_init(); 131 ixp4xx_sys_init();
132 132
133 if (!flash_resource)
134 printk(KERN_ERR "Could not allocate flash resource\n");
135
136 gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); 133 gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
137 gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1; 134 gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;
138 135
diff --git a/arch/arm/mach-pxa/corgi_ssp.c b/arch/arm/mach-pxa/corgi_ssp.c
index f9421318cb7..ff6b4ee037f 100644
--- a/arch/arm/mach-pxa/corgi_ssp.c
+++ b/arch/arm/mach-pxa/corgi_ssp.c
@@ -47,14 +47,15 @@ static struct corgissp_machinfo *ssp_machinfo;
47 */ 47 */
48unsigned long corgi_ssp_ads7846_putget(ulong data) 48unsigned long corgi_ssp_ads7846_putget(ulong data)
49{ 49{
50 unsigned long ret,flag; 50 unsigned long flag;
51 u32 ret = 0;
51 52
52 spin_lock_irqsave(&corgi_ssp_lock, flag); 53 spin_lock_irqsave(&corgi_ssp_lock, flag);
53 if (ssp_machinfo->cs_ads7846 >= 0) 54 if (ssp_machinfo->cs_ads7846 >= 0)
54 GPCR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); 55 GPCR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
55 56
56 ssp_write_word(&corgi_ssp_dev,data); 57 ssp_write_word(&corgi_ssp_dev,data);
57 ret = ssp_read_word(&corgi_ssp_dev); 58 ssp_read_word(&corgi_ssp_dev, &ret);
58 59
59 if (ssp_machinfo->cs_ads7846 >= 0) 60 if (ssp_machinfo->cs_ads7846 >= 0)
60 GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); 61 GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
@@ -88,7 +89,9 @@ void corgi_ssp_ads7846_put(ulong data)
88 89
89unsigned long corgi_ssp_ads7846_get(void) 90unsigned long corgi_ssp_ads7846_get(void)
90{ 91{
91 return ssp_read_word(&corgi_ssp_dev); 92 u32 ret = 0;
93 ssp_read_word(&corgi_ssp_dev, &ret);
94 return ret;
92} 95}
93 96
94EXPORT_SYMBOL(corgi_ssp_ads7846_putget); 97EXPORT_SYMBOL(corgi_ssp_ads7846_putget);
@@ -104,6 +107,7 @@ EXPORT_SYMBOL(corgi_ssp_ads7846_get);
104unsigned long corgi_ssp_dac_put(ulong data) 107unsigned long corgi_ssp_dac_put(ulong data)
105{ 108{
106 unsigned long flag, sscr1 = SSCR1_SPH; 109 unsigned long flag, sscr1 = SSCR1_SPH;
110 u32 tmp;
107 111
108 spin_lock_irqsave(&corgi_ssp_lock, flag); 112 spin_lock_irqsave(&corgi_ssp_lock, flag);
109 113
@@ -118,7 +122,7 @@ unsigned long corgi_ssp_dac_put(ulong data)
118 GPCR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); 122 GPCR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon);
119 ssp_write_word(&corgi_ssp_dev,data); 123 ssp_write_word(&corgi_ssp_dev,data);
120 /* Read null data back from device to prevent SSP overflow */ 124 /* Read null data back from device to prevent SSP overflow */
121 ssp_read_word(&corgi_ssp_dev); 125 ssp_read_word(&corgi_ssp_dev, &tmp);
122 if (ssp_machinfo->cs_lcdcon >= 0) 126 if (ssp_machinfo->cs_lcdcon >= 0)
123 GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); 127 GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon);
124 128
@@ -150,7 +154,7 @@ EXPORT_SYMBOL(corgi_ssp_blduty_set);
150int corgi_ssp_max1111_get(ulong data) 154int corgi_ssp_max1111_get(ulong data)
151{ 155{
152 unsigned long flag; 156 unsigned long flag;
153 int voltage,voltage1,voltage2; 157 long voltage = 0, voltage1 = 0, voltage2 = 0;
154 158
155 spin_lock_irqsave(&corgi_ssp_lock, flag); 159 spin_lock_irqsave(&corgi_ssp_lock, flag);
156 if (ssp_machinfo->cs_max1111 >= 0) 160 if (ssp_machinfo->cs_max1111 >= 0)
@@ -163,15 +167,15 @@ int corgi_ssp_max1111_get(ulong data)
163 167
164 /* TB1/RB1 */ 168 /* TB1/RB1 */
165 ssp_write_word(&corgi_ssp_dev,data); 169 ssp_write_word(&corgi_ssp_dev,data);
166 ssp_read_word(&corgi_ssp_dev); /* null read */ 170 ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1); /* null read */
167 171
168 /* TB12/RB2 */ 172 /* TB12/RB2 */
169 ssp_write_word(&corgi_ssp_dev,0); 173 ssp_write_word(&corgi_ssp_dev,0);
170 voltage1=ssp_read_word(&corgi_ssp_dev); 174 ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1);
171 175
172 /* TB13/RB3*/ 176 /* TB13/RB3*/
173 ssp_write_word(&corgi_ssp_dev,0); 177 ssp_write_word(&corgi_ssp_dev,0);
174 voltage2=ssp_read_word(&corgi_ssp_dev); 178 ssp_read_word(&corgi_ssp_dev, (u32*)&voltage2);
175 179
176 ssp_disable(&corgi_ssp_dev); 180 ssp_disable(&corgi_ssp_dev);
177 ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846)); 181 ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846));
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
index 93096befd01..1fddfeaa630 100644
--- a/arch/arm/mach-pxa/ssp.c
+++ b/arch/arm/mach-pxa/ssp.c
@@ -40,6 +40,8 @@
40 40
41#define PXA_SSP_PORTS 3 41#define PXA_SSP_PORTS 3
42 42
43#define TIMEOUT 100000
44
43struct ssp_info_ { 45struct ssp_info_ {
44 int irq; 46 int irq;
45 u32 clock; 47 u32 clock;
@@ -92,13 +94,18 @@ static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
92 * The caller is expected to perform the necessary locking. 94 * The caller is expected to perform the necessary locking.
93 * 95 *
94 * Returns: 96 * Returns:
95 * %-ETIMEDOUT timeout occurred (for future) 97 * %-ETIMEDOUT timeout occurred
96 * 0 success 98 * 0 success
97 */ 99 */
98int ssp_write_word(struct ssp_dev *dev, u32 data) 100int ssp_write_word(struct ssp_dev *dev, u32 data)
99{ 101{
100 while (!(SSSR_P(dev->port) & SSSR_TNF)) 102 int timeout = TIMEOUT;
103
104 while (!(SSSR_P(dev->port) & SSSR_TNF)) {
105 if (!--timeout)
106 return -ETIMEDOUT;
101 cpu_relax(); 107 cpu_relax();
108 }
102 109
103 SSDR_P(dev->port) = data; 110 SSDR_P(dev->port) = data;
104 111
@@ -117,15 +124,21 @@ int ssp_write_word(struct ssp_dev *dev, u32 data)
117 * The caller is expected to perform the necessary locking. 124 * The caller is expected to perform the necessary locking.
118 * 125 *
119 * Returns: 126 * Returns:
120 * %-ETIMEDOUT timeout occurred (for future) 127 * %-ETIMEDOUT timeout occurred
121 * 32-bit data success 128 * 32-bit data success
122 */ 129 */
123int ssp_read_word(struct ssp_dev *dev) 130int ssp_read_word(struct ssp_dev *dev, u32 *data)
124{ 131{
125 while (!(SSSR_P(dev->port) & SSSR_RNE)) 132 int timeout = TIMEOUT;
133
134 while (!(SSSR_P(dev->port) & SSSR_RNE)) {
135 if (!--timeout)
136 return -ETIMEDOUT;
126 cpu_relax(); 137 cpu_relax();
138 }
127 139
128 return SSDR_P(dev->port); 140 *data = SSDR_P(dev->port);
141 return 0;
129} 142}
130 143
131/** 144/**
@@ -136,13 +149,21 @@ int ssp_read_word(struct ssp_dev *dev)
136 * 149 *
137 * The caller is expected to perform the necessary locking. 150 * The caller is expected to perform the necessary locking.
138 */ 151 */
139void ssp_flush(struct ssp_dev *dev) 152int ssp_flush(struct ssp_dev *dev)
140{ 153{
154 int timeout = TIMEOUT * 2;
155
141 do { 156 do {
142 while (SSSR_P(dev->port) & SSSR_RNE) { 157 while (SSSR_P(dev->port) & SSSR_RNE) {
158 if (!--timeout)
159 return -ETIMEDOUT;
143 (void) SSDR_P(dev->port); 160 (void) SSDR_P(dev->port);
144 } 161 }
162 if (!--timeout)
163 return -ETIMEDOUT;
145 } while (SSSR_P(dev->port) & SSSR_BSY); 164 } while (SSSR_P(dev->port) & SSSR_BSY);
165
166 return 0;
146} 167}
147 168
148/** 169/**
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 0c7938645df..273e05f2b8d 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,45 +10,47 @@ obj-m :=
10obj-n := 10obj-n :=
11obj- := 11obj- :=
12 12
13# DMA
14obj-$(CONFIG_S3C2410_DMA) += dma.o
15
13# S3C2400 support files 16# S3C2400 support files
14obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o 17obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
15 18
16# S3C2410 support files 19# S3C2410 support files
17 20
18obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 21obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
19obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o 22obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
20obj-$(CONFIG_S3C2410_DMA) += dma.o
21 23
22# Power Management support 24# Power Management support
23 25
24obj-$(CONFIG_PM) += pm.o sleep.o 26obj-$(CONFIG_PM) += pm.o sleep.o
25obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o 27obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
26 28
27# S3C2412 support 29# S3C2412 support
28obj-$(CONFIG_CPU_S3C2412) += s3c2412.o 30obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
29obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o 31obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
30 32
31# 33#
32# S3C244X support 34# S3C244X support
33 35
34obj-$(CONFIG_CPU_S3C244X) += s3c244x.o 36obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
35obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o 37obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
36 38
37# Clock control 39# Clock control
38 40
39obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o 41obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
40 42
41# S3C2440 support 43# S3C2440 support
42 44
43obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o 45obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
44obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o 46obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
45obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o 47obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
46obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o 48obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
47 49
48# S3C2442 support 50# S3C2442 support
49 51
50obj-$(CONFIG_CPU_S3C2442) += s3c2442.o 52obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
51obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o 53obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
52 54
53# bast extras 55# bast extras
54 56
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 094cc52745c..25855452fe8 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
112} 112}
113 113
114static void 114static void
115dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan, 115dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
116 struct s3c2410_dma_regstate *regs) 116 struct s3c2410_dma_regstate *regs)
117{ 117{
118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", 118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
@@ -132,7 +132,16 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
132 chan->number, fname, line, chan->load_state, 132 chan->number, fname, line, chan->load_state,
133 chan->curr, chan->next, chan->end); 133 chan->curr, chan->next, chan->end);
134 134
135 dmadbg_showregs(fname, line, chan, &state); 135 dmadbg_dumpregs(fname, line, chan, &state);
136}
137
138static void
139dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
140{
141 struct s3c2410_dma_regstate state;
142
143 dmadbg_capture(chan, &state);
144 dmadbg_dumpregs(fname, line, chan, &state);
136} 145}
137 146
138#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan)) 147#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
@@ -253,10 +262,14 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
253 buf->next); 262 buf->next);
254 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; 263 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
255 } else { 264 } else {
256 pr_debug("load_state is %d => autoreload\n", chan->load_state); 265 //pr_debug("load_state is %d => autoreload\n", chan->load_state);
257 reload = S3C2410_DCON_AUTORELOAD; 266 reload = S3C2410_DCON_AUTORELOAD;
258 } 267 }
259 268
269 if ((buf->data & 0xf0000000) != 0x30000000) {
270 dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
271 }
272
260 writel(buf->data, chan->addr_reg); 273 writel(buf->data, chan->addr_reg);
261 274
262 dma_wrreg(chan, S3C2410_DMA_DCON, 275 dma_wrreg(chan, S3C2410_DMA_DCON,
@@ -370,7 +383,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
370 tmp |= S3C2410_DMASKTRIG_ON; 383 tmp |= S3C2410_DMASKTRIG_ON;
371 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); 384 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
372 385
373 pr_debug("wrote %08lx to DMASKTRIG\n", tmp); 386 pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
374 387
375#if 0 388#if 0
376 /* the dma buffer loads should take care of clearing the AUTO 389 /* the dma buffer loads should take care of clearing the AUTO
@@ -384,7 +397,30 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
384 397
385 dbg_showchan(chan); 398 dbg_showchan(chan);
386 399
400 /* if we've only loaded one buffer onto the channel, then chec
401 * to see if we have another, and if so, try and load it so when
402 * the first buffer is finished, the new one will be loaded onto
403 * the channel */
404
405 if (chan->next != NULL) {
406 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
407
408 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
409 pr_debug("%s: buff not yet loaded, no more todo\n",
410 __FUNCTION__);
411 } else {
412 chan->load_state = S3C2410_DMALOAD_1RUNNING;
413 s3c2410_dma_loadbuffer(chan, chan->next);
414 }
415
416 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
417 s3c2410_dma_loadbuffer(chan, chan->next);
418 }
419 }
420
421
387 local_irq_restore(flags); 422 local_irq_restore(flags);
423
388 return 0; 424 return 0;
389} 425}
390 426
@@ -436,12 +472,11 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
436 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); 472 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
437 if (buf == NULL) { 473 if (buf == NULL) {
438 pr_debug("%s: out of memory (%ld alloc)\n", 474 pr_debug("%s: out of memory (%ld alloc)\n",
439 __FUNCTION__, sizeof(*buf)); 475 __FUNCTION__, (long)sizeof(*buf));
440 return -ENOMEM; 476 return -ENOMEM;
441 } 477 }
442 478
443 pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); 479 //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
444
445 //dbg_showchan(chan); 480 //dbg_showchan(chan);
446 481
447 buf->next = NULL; 482 buf->next = NULL;
@@ -537,14 +572,20 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
537 case S3C2410_DMALOAD_1LOADED: 572 case S3C2410_DMALOAD_1LOADED:
538 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { 573 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
539 /* flag error? */ 574 /* flag error? */
540 printk(KERN_ERR "dma%d: timeout waiting for load\n", 575 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
541 chan->number); 576 chan->number, __FUNCTION__);
542 return; 577 return;
543 } 578 }
544 break; 579 break;
545 580
581 case S3C2410_DMALOAD_1LOADED_1RUNNING:
582 /* I belive in this case we do not have anything to do
583 * until the next buffer comes along, and we turn off the
584 * reload */
585 return;
586
546 default: 587 default:
547 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next", 588 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
548 chan->number, chan->load_state); 589 chan->number, chan->load_state);
549 return; 590 return;
550 591
@@ -629,7 +670,14 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
629 } else { 670 } else {
630 } 671 }
631 672
632 if (chan->next != NULL) { 673 /* only reload if the channel is still running... our buffer done
674 * routine may have altered the state by requesting the dma channel
675 * to stop or shutdown... */
676
677 /* todo: check that when the channel is shut-down from inside this
678 * function, we cope with unsetting reload, etc */
679
680 if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
633 unsigned long flags; 681 unsigned long flags;
634 682
635 switch (chan->load_state) { 683 switch (chan->load_state) {
@@ -644,8 +692,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
644 case S3C2410_DMALOAD_1LOADED: 692 case S3C2410_DMALOAD_1LOADED:
645 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { 693 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
646 /* flag error? */ 694 /* flag error? */
647 printk(KERN_ERR "dma%d: timeout waiting for load\n", 695 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
648 chan->number); 696 chan->number, __FUNCTION__);
649 return IRQ_HANDLED; 697 return IRQ_HANDLED;
650 } 698 }
651 699
@@ -678,8 +726,6 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
678 return IRQ_HANDLED; 726 return IRQ_HANDLED;
679} 727}
680 728
681
682
683/* s3c2410_request_dma 729/* s3c2410_request_dma
684 * 730 *
685 * get control of an dma channel 731 * get control of an dma channel
@@ -718,11 +764,17 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
718 pr_debug("dma%d: %s : requesting irq %d\n", 764 pr_debug("dma%d: %s : requesting irq %d\n",
719 channel, __FUNCTION__, chan->irq); 765 channel, __FUNCTION__, chan->irq);
720 766
767 chan->irq_claimed = 1;
768 local_irq_restore(flags);
769
721 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, 770 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
722 client->name, (void *)chan); 771 client->name, (void *)chan);
723 772
773 local_irq_save(flags);
774
724 if (err) { 775 if (err) {
725 chan->in_use = 0; 776 chan->in_use = 0;
777 chan->irq_claimed = 0;
726 local_irq_restore(flags); 778 local_irq_restore(flags);
727 779
728 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", 780 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
@@ -730,7 +782,6 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
730 return err; 782 return err;
731 } 783 }
732 784
733 chan->irq_claimed = 1;
734 chan->irq_enabled = 1; 785 chan->irq_enabled = 1;
735 } 786 }
736 787
@@ -810,6 +861,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
810 861
811 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); 862 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
812 tmp |= S3C2410_DMASKTRIG_STOP; 863 tmp |= S3C2410_DMASKTRIG_STOP;
864 //tmp &= ~S3C2410_DMASKTRIG_ON;
813 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); 865 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
814 866
815#if 0 867#if 0
@@ -819,6 +871,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
819 dma_wrreg(chan, S3C2410_DMA_DCON, tmp); 871 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
820#endif 872#endif
821 873
874 /* should stop do this, or should we wait for flush? */
822 chan->state = S3C2410_DMA_IDLE; 875 chan->state = S3C2410_DMA_IDLE;
823 chan->load_state = S3C2410_DMALOAD_NONE; 876 chan->load_state = S3C2410_DMALOAD_NONE;
824 877
@@ -827,6 +880,22 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
827 return 0; 880 return 0;
828} 881}
829 882
883void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
884{
885 unsigned long tmp;
886 unsigned int timeout = 0x10000;
887
888 while (timeout-- > 0) {
889 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
890
891 if (!(tmp & S3C2410_DMASKTRIG_ON))
892 return;
893 }
894
895 pr_debug("dma%d: failed to stop?\n", chan->number);
896}
897
898
830/* s3c2410_dma_flush 899/* s3c2410_dma_flush
831 * 900 *
832 * stop the channel, and remove all current and pending transfers 901 * stop the channel, and remove all current and pending transfers
@@ -837,7 +906,9 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
837 s3c2410_dma_buf_t *buf, *next; 906 s3c2410_dma_buf_t *buf, *next;
838 unsigned long flags; 907 unsigned long flags;
839 908
840 pr_debug("%s:\n", __FUNCTION__); 909 pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
910
911 dbg_showchan(chan);
841 912
842 local_irq_save(flags); 913 local_irq_save(flags);
843 914
@@ -864,11 +935,64 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
864 } 935 }
865 } 936 }
866 937
938 dbg_showregs(chan);
939
940 s3c2410_dma_waitforstop(chan);
941
942#if 0
943 /* should also clear interrupts, according to WinCE BSP */
944 {
945 unsigned long tmp;
946
947 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
948 tmp |= S3C2410_DCON_NORELOAD;
949 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
950 }
951#endif
952
953 dbg_showregs(chan);
954
867 local_irq_restore(flags); 955 local_irq_restore(flags);
868 956
869 return 0; 957 return 0;
870} 958}
871 959
960int
961s3c2410_dma_started(s3c2410_dma_chan_t *chan)
962{
963 unsigned long flags;
964
965 local_irq_save(flags);
966
967 dbg_showchan(chan);
968
969 /* if we've only loaded one buffer onto the channel, then chec
970 * to see if we have another, and if so, try and load it so when
971 * the first buffer is finished, the new one will be loaded onto
972 * the channel */
973
974 if (chan->next != NULL) {
975 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
976
977 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
978 pr_debug("%s: buff not yet loaded, no more todo\n",
979 __FUNCTION__);
980 } else {
981 chan->load_state = S3C2410_DMALOAD_1RUNNING;
982 s3c2410_dma_loadbuffer(chan, chan->next);
983 }
984
985 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
986 s3c2410_dma_loadbuffer(chan, chan->next);
987 }
988 }
989
990
991 local_irq_restore(flags);
992
993 return 0;
994
995}
872 996
873int 997int
874s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) 998s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
@@ -885,14 +1009,15 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
885 return s3c2410_dma_dostop(chan); 1009 return s3c2410_dma_dostop(chan);
886 1010
887 case S3C2410_DMAOP_PAUSE: 1011 case S3C2410_DMAOP_PAUSE:
888 return -ENOENT;
889
890 case S3C2410_DMAOP_RESUME: 1012 case S3C2410_DMAOP_RESUME:
891 return -ENOENT; 1013 return -ENOENT;
892 1014
893 case S3C2410_DMAOP_FLUSH: 1015 case S3C2410_DMAOP_FLUSH:
894 return s3c2410_dma_flush(chan); 1016 return s3c2410_dma_flush(chan);
895 1017
1018 case S3C2410_DMAOP_STARTED:
1019 return s3c2410_dma_started(chan);
1020
896 case S3C2410_DMAOP_TIMEOUT: 1021 case S3C2410_DMAOP_TIMEOUT:
897 return 0; 1022 return 0;
898 1023
diff --git a/arch/arm/mach-sa1100/ssp.c b/arch/arm/mach-sa1100/ssp.c
index 1604dadf27f..5eba5fbbb56 100644
--- a/arch/arm/mach-sa1100/ssp.c
+++ b/arch/arm/mach-sa1100/ssp.c
@@ -23,6 +23,8 @@
23#include <asm/hardware.h> 23#include <asm/hardware.h>
24#include <asm/hardware/ssp.h> 24#include <asm/hardware/ssp.h>
25 25
26#define TIMEOUT 100000
27
26static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs) 28static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
27{ 29{
28 unsigned int status = Ser4SSSR; 30 unsigned int status = Ser4SSSR;
@@ -47,18 +49,27 @@ static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
47 * The caller is expected to perform the necessary locking. 49 * The caller is expected to perform the necessary locking.
48 * 50 *
49 * Returns: 51 * Returns:
50 * %-ETIMEDOUT timeout occurred (for future) 52 * %-ETIMEDOUT timeout occurred
51 * 0 success 53 * 0 success
52 */ 54 */
53int ssp_write_word(u16 data) 55int ssp_write_word(u16 data)
54{ 56{
55 while (!(Ser4SSSR & SSSR_TNF)) 57 int timeout = TIMEOUT;
58
59 while (!(Ser4SSSR & SSSR_TNF)) {
60 if (!--timeout)
61 return -ETIMEDOUT;
56 cpu_relax(); 62 cpu_relax();
63 }
57 64
58 Ser4SSDR = data; 65 Ser4SSDR = data;
59 66
60 while (!(Ser4SSSR & SSSR_BSY)) 67 timeout = TIMEOUT;
68 while (!(Ser4SSSR & SSSR_BSY)) {
69 if (!--timeout)
70 return -ETIMEDOUT;
61 cpu_relax(); 71 cpu_relax();
72 }
62 73
63 return 0; 74 return 0;
64} 75}
@@ -75,15 +86,22 @@ int ssp_write_word(u16 data)
75 * The caller is expected to perform the necessary locking. 86 * The caller is expected to perform the necessary locking.
76 * 87 *
77 * Returns: 88 * Returns:
78 * %-ETIMEDOUT timeout occurred (for future) 89 * %-ETIMEDOUT timeout occurred
79 * 16-bit data success 90 * 16-bit data success
80 */ 91 */
81int ssp_read_word(void) 92int ssp_read_word(u16 *data)
82{ 93{
83 while (!(Ser4SSSR & SSSR_RNE)) 94 int timeout = TIMEOUT;
95
96 while (!(Ser4SSSR & SSSR_RNE)) {
97 if (!--timeout)
98 return -ETIMEDOUT;
84 cpu_relax(); 99 cpu_relax();
100 }
101
102 *data = (u16)Ser4SSDR;
85 103
86 return Ser4SSDR; 104 return 0;
87} 105}
88 106
89/** 107/**
@@ -93,14 +111,26 @@ int ssp_read_word(void)
93 * is empty. 111 * is empty.
94 * 112 *
95 * The caller is expected to perform the necessary locking. 113 * The caller is expected to perform the necessary locking.
114 *
115 * Returns:
116 * %-ETIMEDOUT timeout occurred
117 * 0 success
96 */ 118 */
97void ssp_flush(void) 119int ssp_flush(void)
98{ 120{
121 int timeout = TIMEOUT * 2;
122
99 do { 123 do {
100 while (Ser4SSSR & SSSR_RNE) { 124 while (Ser4SSSR & SSSR_RNE) {
125 if (!--timeout)
126 return -ETIMEDOUT;
101 (void) Ser4SSDR; 127 (void) Ser4SSDR;
102 } 128 }
129 if (!--timeout)
130 return -ETIMEDOUT;
103 } while (Ser4SSSR & SSSR_BSY); 131 } while (Ser4SSSR & SSSR_BSY);
132
133 return 0;
104} 134}
105 135
106/** 136/**
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index c4e3f8c6847..f2bbef07b1e 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -285,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = {
285 285
286static struct resource versatile_flash_resource = { 286static struct resource versatile_flash_resource = {
287 .start = VERSATILE_FLASH_BASE, 287 .start = VERSATILE_FLASH_BASE,
288 .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE, 288 .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1,
289 .flags = IORESOURCE_MEM, 289 .flags = IORESOURCE_MEM,
290}; 290};
291 291
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5f80f184cd3..b4f220dd5eb 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -46,7 +46,7 @@ config CPU_ARM710
46config CPU_ARM720T 46config CPU_ARM720T
47 bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR 47 bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR
48 default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X 48 default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X
49 select CPU_32v4 49 select CPU_32v4T
50 select CPU_ABRT_LV4T 50 select CPU_ABRT_LV4T
51 select CPU_CACHE_V4 51 select CPU_CACHE_V4
52 select CPU_CACHE_VIVT 52 select CPU_CACHE_VIVT
@@ -64,7 +64,7 @@ config CPU_ARM920T
64 bool "Support ARM920T processor" 64 bool "Support ARM920T processor"
65 depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200 65 depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200
66 default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200 66 default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200
67 select CPU_32v4 67 select CPU_32v4T
68 select CPU_ABRT_EV4T 68 select CPU_ABRT_EV4T
69 select CPU_CACHE_V4WT 69 select CPU_CACHE_V4WT
70 select CPU_CACHE_VIVT 70 select CPU_CACHE_VIVT
@@ -85,7 +85,7 @@ config CPU_ARM922T
85 bool "Support ARM922T processor" if ARCH_INTEGRATOR 85 bool "Support ARM922T processor" if ARCH_INTEGRATOR
86 depends on ARCH_LH7A40X || ARCH_INTEGRATOR 86 depends on ARCH_LH7A40X || ARCH_INTEGRATOR
87 default y if ARCH_LH7A40X 87 default y if ARCH_LH7A40X
88 select CPU_32v4 88 select CPU_32v4T
89 select CPU_ABRT_EV4T 89 select CPU_ABRT_EV4T
90 select CPU_CACHE_V4WT 90 select CPU_CACHE_V4WT
91 select CPU_CACHE_VIVT 91 select CPU_CACHE_VIVT
@@ -104,7 +104,7 @@ config CPU_ARM925T
104 bool "Support ARM925T processor" if ARCH_OMAP1 104 bool "Support ARM925T processor" if ARCH_OMAP1
105 depends on ARCH_OMAP15XX 105 depends on ARCH_OMAP15XX
106 default y if ARCH_OMAP15XX 106 default y if ARCH_OMAP15XX
107 select CPU_32v4 107 select CPU_32v4T
108 select CPU_ABRT_EV4T 108 select CPU_ABRT_EV4T
109 select CPU_CACHE_V4WT 109 select CPU_CACHE_V4WT
110 select CPU_CACHE_VIVT 110 select CPU_CACHE_VIVT
@@ -285,6 +285,11 @@ config CPU_32v4
285 select TLS_REG_EMUL if SMP || !MMU 285 select TLS_REG_EMUL if SMP || !MMU
286 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 286 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
287 287
288config CPU_32v4T
289 bool
290 select TLS_REG_EMUL if SMP || !MMU
291 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
292
288config CPU_32v5 293config CPU_32v5
289 bool 294 bool
290 select TLS_REG_EMUL if SMP || !MMU 295 select TLS_REG_EMUL if SMP || !MMU
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 4b97950984e..5fbdf81a8aa 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -353,3 +353,11 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand);
353 * A special flag to tell the normalisation code not to normalise. 353 * A special flag to tell the normalisation code not to normalise.
354 */ 354 */
355#define VFP_NAN_FLAG 0x100 355#define VFP_NAN_FLAG 0x100
356
357/*
358 * A bit pattern used to indicate the initial (unset) value of the
359 * exception mask, in case nothing handles an instruction. This
360 * doesn't include the NAN flag, which get masked out before
361 * we check for an error.
362 */
363#define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG)
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 009038c8113..04bd3425b29 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -465,7 +465,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
465 */ 465 */
466 if (tm & (VFP_INFINITY|VFP_NAN)) { 466 if (tm & (VFP_INFINITY|VFP_NAN)) {
467 vsd.exponent = 255; 467 vsd.exponent = 255;
468 if (tm & VFP_NAN) 468 if (tm == VFP_QNAN)
469 vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; 469 vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
470 goto pack_nan; 470 goto pack_nan;
471 } else if (tm & VFP_ZERO) 471 } else if (tm & VFP_ZERO)
@@ -1127,7 +1127,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1127{ 1127{
1128 u32 op = inst & FOP_MASK; 1128 u32 op = inst & FOP_MASK;
1129 u32 exceptions = 0; 1129 u32 exceptions = 0;
1130 unsigned int dd = vfp_get_dd(inst); 1130 unsigned int dest;
1131 unsigned int dn = vfp_get_dn(inst); 1131 unsigned int dn = vfp_get_dn(inst);
1132 unsigned int dm = vfp_get_dm(inst); 1132 unsigned int dm = vfp_get_dm(inst);
1133 unsigned int vecitr, veclen, vecstride; 1133 unsigned int vecitr, veclen, vecstride;
@@ -1137,10 +1137,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1137 vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2; 1137 vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2;
1138 1138
1139 /* 1139 /*
1140 * fcvtds takes an sN register number as destination, not dN.
1141 * It also always operates on scalars.
1142 */
1143 if ((inst & FEXT_MASK) == FEXT_FCVT) {
1144 veclen = 0;
1145 dest = vfp_get_sd(inst);
1146 } else
1147 dest = vfp_get_dd(inst);
1148
1149 /*
1140 * If destination bank is zero, vector length is always '1'. 1150 * If destination bank is zero, vector length is always '1'.
1141 * ARM DDI0100F C5.1.3, C5.3.2. 1151 * ARM DDI0100F C5.1.3, C5.3.2.
1142 */ 1152 */
1143 if (FREG_BANK(dd) == 0) 1153 if (FREG_BANK(dest) == 0)
1144 veclen = 0; 1154 veclen = 0;
1145 1155
1146 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, 1156 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1153,16 +1163,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1153 for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { 1163 for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
1154 u32 except; 1164 u32 except;
1155 1165
1156 if (op == FOP_EXT) 1166 if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
1167 pr_debug("VFP: itr%d (s%u) = op[%u] (d%u)\n",
1168 vecitr >> FPSCR_LENGTH_BIT,
1169 dest, dn, dm);
1170 else if (op == FOP_EXT)
1157 pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n", 1171 pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n",
1158 vecitr >> FPSCR_LENGTH_BIT, 1172 vecitr >> FPSCR_LENGTH_BIT,
1159 dd, dn, dm); 1173 dest, dn, dm);
1160 else 1174 else
1161 pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n", 1175 pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n",
1162 vecitr >> FPSCR_LENGTH_BIT, 1176 vecitr >> FPSCR_LENGTH_BIT,
1163 dd, dn, FOP_TO_IDX(op), dm); 1177 dest, dn, FOP_TO_IDX(op), dm);
1164 1178
1165 except = fop(dd, dn, dm, fpscr); 1179 except = fop(dest, dn, dm, fpscr);
1166 pr_debug("VFP: itr%d: exceptions=%08x\n", 1180 pr_debug("VFP: itr%d: exceptions=%08x\n",
1167 vecitr >> FPSCR_LENGTH_BIT, except); 1181 vecitr >> FPSCR_LENGTH_BIT, except);
1168 1182
@@ -1180,7 +1194,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1180 * we encounter an exception. We continue. 1194 * we encounter an exception. We continue.
1181 */ 1195 */
1182 1196
1183 dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6); 1197 dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 6);
1184 dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6); 1198 dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6);
1185 if (FREG_BANK(dm) != 0) 1199 if (FREG_BANK(dm) != 0)
1186 dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6); 1200 dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6);
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9d265d5e748..4178f6cc3d3 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -131,7 +131,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
131 131
132 pr_debug("VFP: raising exceptions %08x\n", exceptions); 132 pr_debug("VFP: raising exceptions %08x\n", exceptions);
133 133
134 if (exceptions == (u32)-1) { 134 if (exceptions == VFP_EXCEPTION_ERROR) {
135 vfp_panic("unhandled bounce"); 135 vfp_panic("unhandled bounce");
136 vfp_raise_sigfpe(0, regs); 136 vfp_raise_sigfpe(0, regs);
137 return; 137 return;
@@ -170,7 +170,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
170 */ 170 */
171static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) 171static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
172{ 172{
173 u32 exceptions = (u32)-1; 173 u32 exceptions = VFP_EXCEPTION_ERROR;
174 174
175 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); 175 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
176 176
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index dae2c2f4605..78d7cac5f36 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -506,7 +506,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
506 */ 506 */
507 if (tm & (VFP_INFINITY|VFP_NAN)) { 507 if (tm & (VFP_INFINITY|VFP_NAN)) {
508 vdd.exponent = 2047; 508 vdd.exponent = 2047;
509 if (tm & VFP_NAN) 509 if (tm == VFP_QNAN)
510 vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; 510 vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
511 goto pack_nan; 511 goto pack_nan;
512 } else if (tm & VFP_ZERO) 512 } else if (tm & VFP_ZERO)
@@ -514,10 +514,6 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
514 else 514 else
515 vdd.exponent = vsm.exponent + (1023 - 127); 515 vdd.exponent = vsm.exponent + (1023 - 127);
516 516
517 /*
518 * Technically, if bit 0 of dd is set, this is an invalid
519 * instruction. However, we ignore this for efficiency.
520 */
521 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd"); 517 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
522 518
523 pack_nan: 519 pack_nan:
@@ -1174,7 +1170,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1174{ 1170{
1175 u32 op = inst & FOP_MASK; 1171 u32 op = inst & FOP_MASK;
1176 u32 exceptions = 0; 1172 u32 exceptions = 0;
1177 unsigned int sd = vfp_get_sd(inst); 1173 unsigned int dest;
1178 unsigned int sn = vfp_get_sn(inst); 1174 unsigned int sn = vfp_get_sn(inst);
1179 unsigned int sm = vfp_get_sm(inst); 1175 unsigned int sm = vfp_get_sm(inst);
1180 unsigned int vecitr, veclen, vecstride; 1176 unsigned int vecitr, veclen, vecstride;
@@ -1184,10 +1180,22 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1184 vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK); 1180 vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
1185 1181
1186 /* 1182 /*
1183 * fcvtsd takes a dN register number as destination, not sN.
1184 * Technically, if bit 0 of dd is set, this is an invalid
1185 * instruction. However, we ignore this for efficiency.
1186 * It also only operates on scalars.
1187 */
1188 if ((inst & FEXT_MASK) == FEXT_FCVT) {
1189 veclen = 0;
1190 dest = vfp_get_dd(inst);
1191 } else
1192 dest = vfp_get_sd(inst);
1193
1194 /*
1187 * If destination bank is zero, vector length is always '1'. 1195 * If destination bank is zero, vector length is always '1'.
1188 * ARM DDI0100F C5.1.3, C5.3.2. 1196 * ARM DDI0100F C5.1.3, C5.3.2.
1189 */ 1197 */
1190 if (FREG_BANK(sd) == 0) 1198 if (FREG_BANK(dest) == 0)
1191 veclen = 0; 1199 veclen = 0;
1192 1200
1193 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, 1201 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1201,15 +1209,18 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1201 s32 m = vfp_get_float(sm); 1209 s32 m = vfp_get_float(sm);
1202 u32 except; 1210 u32 except;
1203 1211
1204 if (op == FOP_EXT) 1212 if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
1213 pr_debug("VFP: itr%d (d%u) = op[%u] (s%u=%08x)\n",
1214 vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
1215 else if (op == FOP_EXT)
1205 pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n", 1216 pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n",
1206 vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m); 1217 vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
1207 else 1218 else
1208 pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n", 1219 pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n",
1209 vecitr >> FPSCR_LENGTH_BIT, sd, sn, 1220 vecitr >> FPSCR_LENGTH_BIT, dest, sn,
1210 FOP_TO_IDX(op), sm, m); 1221 FOP_TO_IDX(op), sm, m);
1211 1222
1212 except = fop(sd, sn, m, fpscr); 1223 except = fop(dest, sn, m, fpscr);
1213 pr_debug("VFP: itr%d: exceptions=%08x\n", 1224 pr_debug("VFP: itr%d: exceptions=%08x\n",
1214 vecitr >> FPSCR_LENGTH_BIT, except); 1225 vecitr >> FPSCR_LENGTH_BIT, except);
1215 1226
@@ -1227,7 +1238,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1227 * we encounter an exception. We continue. 1238 * we encounter an exception. We continue.
1228 */ 1239 */
1229 1240
1230 sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7); 1241 dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
1231 sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7); 1242 sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
1232 if (FREG_BANK(sm) != 0) 1243 if (FREG_BANK(sm) != 0)
1233 sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7); 1244 sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index f71fb4a029c..b2751eadbc5 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -142,6 +142,7 @@ config X86_SUMMIT
142 In particular, it is needed for the x440. 142 In particular, it is needed for the x440.
143 143
144 If you don't have one of these computers, you should say N here. 144 If you don't have one of these computers, you should say N here.
145 If you want to build a NUMA kernel, you must select ACPI.
145 146
146config X86_BIGSMP 147config X86_BIGSMP
147 bool "Support for other sub-arch SMP systems with more than 8 CPUs" 148 bool "Support for other sub-arch SMP systems with more than 8 CPUs"
@@ -169,6 +170,7 @@ config X86_GENERICARCH
169 help 170 help
170 This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. 171 This option compiles in the Summit, bigsmp, ES7000, default subarchitectures.
171 It is intended for a generic binary kernel. 172 It is intended for a generic binary kernel.
173 If you want a NUMA kernel, select ACPI. We need SRAT for NUMA.
172 174
173config X86_ES7000 175config X86_ES7000
174 bool "Support for Unisys ES7000 IA32 series" 176 bool "Support for Unisys ES7000 IA32 series"
@@ -542,7 +544,7 @@ config X86_PAE
542# Common NUMA Features 544# Common NUMA Features
543config NUMA 545config NUMA
544 bool "Numa Memory Allocation and Scheduler Support" 546 bool "Numa Memory Allocation and Scheduler Support"
545 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) 547 depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI)
546 default n if X86_PC 548 default n if X86_PC
547 default y if (X86_NUMAQ || X86_SUMMIT) 549 default y if (X86_NUMAQ || X86_SUMMIT)
548 550
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 0db6387025c..ee003bc0e8b 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; }
59 59
60#define BAD_MADT_ENTRY(entry, end) ( \ 60#define BAD_MADT_ENTRY(entry, end) ( \
61 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 61 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
62 ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 62 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
63 63
64#define PREFIX "ACPI: " 64#define PREFIX "ACPI: "
65 65
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index 9f408eee4e6..b781b38131c 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -292,7 +292,10 @@ ENTRY(do_suspend_lowlevel)
292 pushl $3 292 pushl $3
293 call acpi_enter_sleep_state 293 call acpi_enter_sleep_state
294 addl $4, %esp 294 addl $4, %esp
295 ret 295
296# In case of S3 failure, we'll emerge here. Jump
297# to ret_point to recover
298 jmp ret_point
296 .p2align 4,,7 299 .p2align 4,,7
297ret_point: 300ret_point:
298 call restore_registers 301 call restore_registers
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index efb41e81351..e6ea00edcb5 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -567,16 +567,11 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
567static int __init 567static int __init
568acpi_cpufreq_init (void) 568acpi_cpufreq_init (void)
569{ 569{
570 int result = 0;
571
572 dprintk("acpi_cpufreq_init\n"); 570 dprintk("acpi_cpufreq_init\n");
573 571
574 result = acpi_cpufreq_early_init_acpi(); 572 acpi_cpufreq_early_init_acpi();
575 573
576 if (!result) 574 return cpufreq_register_driver(&acpi_cpufreq_driver);
577 result = cpufreq_register_driver(&acpi_cpufreq_driver);
578
579 return (result);
580} 575}
581 576
582 577
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index eb79aa2fa8b..a6b8bd89aa2 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -317,20 +317,14 @@ is386: movl $2,%ecx # set MP
317 movl %eax,%gs 317 movl %eax,%gs
318 lldt %ax 318 lldt %ax
319 cld # gcc2 wants the direction flag cleared at all times 319 cld # gcc2 wants the direction flag cleared at all times
320 pushl %eax # fake return address
320#ifdef CONFIG_SMP 321#ifdef CONFIG_SMP
321 movb ready, %cl 322 movb ready, %cl
322 movb $1, ready 323 movb $1, ready
323 cmpb $0,%cl 324 cmpb $0,%cl # the first CPU calls start_kernel
324 je 1f # the first CPU calls start_kernel 325 jne initialize_secondary # all other CPUs call initialize_secondary
325 # all other CPUs call initialize_secondary
326 call initialize_secondary
327 jmp L6
3281:
329#endif /* CONFIG_SMP */ 326#endif /* CONFIG_SMP */
330 call start_kernel 327 jmp start_kernel
331L6:
332 jmp L6 # main should never return here, but
333 # just in case, we know what happens.
334 328
335/* 329/*
336 * We depend on ET to be correct. This checks for 287/387. 330 * We depend on ET to be correct. This checks for 287/387.
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 6cb529f60dc..5fe547cd8f9 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -82,10 +82,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
82 } 82 }
83#endif 83#endif
84 84
85 if (!irq_desc[irq].handle_irq) {
86 __do_IRQ(irq, regs);
87 goto out_exit;
88 }
89#ifdef CONFIG_4KSTACKS 85#ifdef CONFIG_4KSTACKS
90 86
91 curctx = (union irq_ctx *) current_thread_info(); 87 curctx = (union irq_ctx *) current_thread_info();
@@ -125,7 +121,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
125#endif 121#endif
126 __do_IRQ(irq, regs); 122 __do_IRQ(irq, regs);
127 123
128out_exit:
129 irq_exit(); 124 irq_exit();
130 125
131 return 1; 126 return 1;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index f1682206d30..345ffb7d904 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -956,38 +956,6 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
956 return 0; 956 return 0;
957} 957}
958 958
959 /*
960 * This function checks if the entire range <start,end> is mapped with type.
961 *
962 * Note: this function only works correct if the e820 table is sorted and
963 * not-overlapping, which is the case
964 */
965int __init
966e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
967{
968 u64 start = s;
969 u64 end = e;
970 int i;
971 for (i = 0; i < e820.nr_map; i++) {
972 struct e820entry *ei = &e820.map[i];
973 if (type && ei->type != type)
974 continue;
975 /* is the region (part) in overlap with the current region ?*/
976 if (ei->addr >= end || ei->addr + ei->size <= start)
977 continue;
978 /* if the region is at the beginning of <start,end> we move
979 * start to the end of the region since it's ok until there
980 */
981 if (ei->addr <= start)
982 start = ei->addr + ei->size;
983 /* if start is now at or beyond end, we're done, full
984 * coverage */
985 if (start >= end)
986 return 1; /* we're done */
987 }
988 return 0;
989}
990
991/* 959/*
992 * Find the highest page frame number we have available 960 * Find the highest page frame number we have available
993 */ 961 */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0d4005dc06c..7e9edafffd8 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -92,7 +92,11 @@ asmlinkage void spurious_interrupt_bug(void);
92asmlinkage void machine_check(void); 92asmlinkage void machine_check(void);
93 93
94static int kstack_depth_to_print = 24; 94static int kstack_depth_to_print = 24;
95#ifdef CONFIG_STACK_UNWIND
95static int call_trace = 1; 96static int call_trace = 1;
97#else
98#define call_trace (-1)
99#endif
96ATOMIC_NOTIFIER_HEAD(i386die_chain); 100ATOMIC_NOTIFIER_HEAD(i386die_chain);
97 101
98int register_die_notifier(struct notifier_block *nb) 102int register_die_notifier(struct notifier_block *nb)
@@ -187,22 +191,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 191 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 192 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 193 }
190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) { 194 if (unw_ret > 0) {
191#ifdef CONFIG_STACK_UNWIND 195 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
192 print_symbol("DWARF2 unwinder stuck at %s\n", 196 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info)); 197 UNW_PC(&info));
194 if (call_trace == 1) { 198 if (UNW_SP(&info) >= PAGE_OFFSET) {
195 printk("Leftover inexact backtrace:\n"); 199 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info); 200 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1) 201 } else
202 printk("Full inexact backtrace again:\n");
203 } else if (call_trace >= 1)
199 return; 204 return;
200 else 205 else
201 printk("Full inexact backtrace again:\n"); 206 printk("Full inexact backtrace again:\n");
202#else 207 } else
203 printk("Inexact backtrace:\n"); 208 printk("Inexact backtrace:\n");
204#endif
205 }
206 } 209 }
207 210
208 if (task == current) { 211 if (task == current) {
@@ -454,7 +457,7 @@ void die(const char * str, struct pt_regs * regs, long err)
454 panic("Fatal exception in interrupt"); 457 panic("Fatal exception in interrupt");
455 458
456 if (panic_on_oops) 459 if (panic_on_oops)
457 panic("Fatal exception: panic_on_oops"); 460 panic("Fatal exception");
458 461
459 oops_exit(); 462 oops_exit();
460 do_exit(SIGSEGV); 463 do_exit(SIGSEGV);
@@ -1241,6 +1244,7 @@ static int __init kstack_setup(char *s)
1241} 1244}
1242__setup("kstack=", kstack_setup); 1245__setup("kstack=", kstack_setup);
1243 1246
1247#ifdef CONFIG_STACK_UNWIND
1244static int __init call_trace_setup(char *s) 1248static int __init call_trace_setup(char *s)
1245{ 1249{
1246 if (strcmp(s, "old") == 0) 1250 if (strcmp(s, "old") == 0)
@@ -1254,3 +1258,4 @@ static int __init call_trace_setup(char *s)
1254 return 1; 1258 return 1;
1255} 1259}
1256__setup("call_trace=", call_trace_setup); 1260__setup("call_trace=", call_trace_setup);
1261#endif
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index 0a362e3aeac..1220dd828ce 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -237,6 +237,11 @@ char * __devinit pcibios_setup(char *str)
237 pci_probe &= ~PCI_PROBE_MMCONF; 237 pci_probe &= ~PCI_PROBE_MMCONF;
238 return NULL; 238 return NULL;
239 } 239 }
240 /* override DMI blacklist */
241 else if (!strcmp(str, "mmconf")) {
242 pci_probe |= PCI_PROBE_MMCONF_FORCE;
243 return NULL;
244 }
240#endif 245#endif
241 else if (!strcmp(str, "noacpi")) { 246 else if (!strcmp(str, "noacpi")) {
242 acpi_noirq_set(); 247 acpi_noirq_set();
diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c
index c7650a7e0b0..51087a9d917 100644
--- a/arch/i386/pci/init.c
+++ b/arch/i386/pci/init.c
@@ -14,8 +14,12 @@ static __init int pci_access_init(void)
14#ifdef CONFIG_PCI_BIOS 14#ifdef CONFIG_PCI_BIOS
15 pci_pcbios_init(); 15 pci_pcbios_init();
16#endif 16#endif
17 if (raw_pci_ops) 17 /*
18 return 0; 18 * don't check for raw_pci_ops here because we want pcbios as last
19 * fallback, yet it's needed to run first to set pcibios_last_bus
20 * in case legacy PCI probing is used. otherwise detecting peer busses
21 * fails.
22 */
19#ifdef CONFIG_PCI_DIRECT 23#ifdef CONFIG_PCI_DIRECT
20 pci_direct_init(); 24 pci_direct_init();
21#endif 25#endif
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index e545b0992c4..ef5a2faa7d8 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -12,6 +12,7 @@
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/dmi.h>
15#include <asm/e820.h> 16#include <asm/e820.h>
16#include "pci.h" 17#include "pci.h"
17 18
@@ -178,7 +179,7 @@ static __init void unreachable_devices(void)
178 pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0)); 179 pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0));
179 if (addr == 0 || 180 if (addr == 0 ||
180 readl((u32 __iomem *)mmcfg_virt_addr) != val1) { 181 readl((u32 __iomem *)mmcfg_virt_addr) != val1) {
181 set_bit(i, fallback_slots); 182 set_bit(i + 32*k, fallback_slots);
182 printk(KERN_NOTICE 183 printk(KERN_NOTICE
183 "PCI: No mmconfig possible on %x:%x\n", k, i); 184 "PCI: No mmconfig possible on %x:%x\n", k, i);
184 } 185 }
@@ -187,9 +188,31 @@ static __init void unreachable_devices(void)
187 } 188 }
188} 189}
189 190
191static int disable_mcfg(struct dmi_system_id *d)
192{
193 printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
194 pci_probe &= ~PCI_PROBE_MMCONF;
195 return 0;
196}
197
198static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
199 /* Has broken MCFG table that makes the system hang when used */
200 {
201 .callback = disable_mcfg,
202 .ident = "Intel D3C5105 SDV",
203 .matches = {
204 DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
205 DMI_MATCH(DMI_BOARD_NAME, "D26928"),
206 },
207 },
208 {}
209};
210
190void __init pci_mmcfg_init(void) 211void __init pci_mmcfg_init(void)
191{ 212{
192 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 213 dmi_check_system(dmi_bad_mcfg);
214
215 if ((pci_probe & (PCI_PROBE_MMCONF_FORCE|PCI_PROBE_MMCONF)) == 0)
193 return; 216 return;
194 217
195 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 218 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@@ -198,15 +221,6 @@ void __init pci_mmcfg_init(void)
198 (pci_mmcfg_config[0].base_address == 0)) 221 (pci_mmcfg_config[0].base_address == 0))
199 return; 222 return;
200 223
201 if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
202 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
203 E820_RESERVED)) {
204 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
205 pci_mmcfg_config[0].base_address);
206 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
207 return;
208 }
209
210 printk(KERN_INFO "PCI: Using MMCONFIG\n"); 224 printk(KERN_INFO "PCI: Using MMCONFIG\n");
211 raw_pci_ops = &pci_mmcfg; 225 raw_pci_ops = &pci_mmcfg;
212 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 226 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h
index bf4e7933538..49a849b3a24 100644
--- a/arch/i386/pci/pci.h
+++ b/arch/i386/pci/pci.h
@@ -16,7 +16,8 @@
16#define PCI_PROBE_CONF1 0x0002 16#define PCI_PROBE_CONF1 0x0002
17#define PCI_PROBE_CONF2 0x0004 17#define PCI_PROBE_CONF2 0x0004
18#define PCI_PROBE_MMCONF 0x0008 18#define PCI_PROBE_MMCONF 0x0008
19#define PCI_PROBE_MASK 0x000f 19#define PCI_PROBE_MMCONF_FORCE 0x0010
20#define PCI_PROBE_MASK 0x00ff
20 21
21#define PCI_NO_SORT 0x0100 22#define PCI_NO_SORT 0x0100
22#define PCI_BIOS_SORT 0x0200 23#define PCI_BIOS_SORT 0x0200
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 47de9ee6bcd..674de894347 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -258,7 +258,7 @@ config NR_CPUS
258 int "Maximum number of CPUs (2-1024)" 258 int "Maximum number of CPUs (2-1024)"
259 range 2 1024 259 range 2 1024
260 depends on SMP 260 depends on SMP
261 default "64" 261 default "1024"
262 help 262 help
263 You should set this to the number of CPUs in your system, but 263 You should set this to the number of CPUs in your system, but
264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but 264 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
@@ -354,7 +354,7 @@ config NUMA
354config NODES_SHIFT 354config NODES_SHIFT
355 int "Max num nodes shift(3-10)" 355 int "Max num nodes shift(3-10)"
356 range 3 10 356 range 3 10
357 default "8" 357 default "10"
358 depends on NEED_MULTIPLE_NODES 358 depends on NEED_MULTIPLE_NODES
359 help 359 help
360 This option specifies the maximum number of nodes in your SSI system. 360 This option specifies the maximum number of nodes in your SSI system.
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 8a4f0d0d17a..8f0a16a79a6 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
244 244
245 if (scatterlen == 0) 245 if (scatterlen == 0)
246 memcpy(sc->request_buffer, buf, len); 246 memcpy(sc->request_buffer, buf, len);
247 else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { 247 else for (slp = (struct scatterlist *)sc->request_buffer;
248 scatterlen-- > 0 && len > 0; slp++) {
248 unsigned thislen = min(len, slp->length); 249 unsigned thislen = min(len, slp->length);
249 250
250 memcpy(page_address(slp->page) + slp->offset, buf, thislen); 251 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 99761b81db4..0176556aeec 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index d24fa393b18..f648c610b10 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -67,10 +67,8 @@ static int __init topology_init(void)
67#endif 67#endif
68 68
69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); 69 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
70 if (!sysfs_cpus) { 70 if (!sysfs_cpus)
71 err = -ENOMEM; 71 panic("kzalloc in topology_init failed - NR_CPUS too big?");
72 goto out;
73 }
74 72
75 for_each_present_cpu(i) { 73 for_each_present_cpu(i) {
76 if((err = arch_register_cpu(i))) 74 if((err = arch_register_cpu(i)))
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 5a0420464c6..fffa9e0826b 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -118,7 +118,7 @@ die (const char *str, struct pt_regs *regs, long err)
118 spin_unlock_irq(&die.lock); 118 spin_unlock_irq(&die.lock);
119 119
120 if (panic_on_oops) 120 if (panic_on_oops)
121 panic("Fatal exception: panic_on_oops"); 121 panic("Fatal exception");
122 122
123 do_exit(SIGSEGV); 123 do_exit(SIGSEGV);
124} 124}
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c2f69f7942a..1f3540826e6 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
279 return part->reason; 279 return part->reason;
280 } 280 }
281 281
282 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), 282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
283 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); 283 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 284 if (bte_ret == BTE_SUCCESS) {
285 return xpcSuccess; 285 return xpcSuccess;
286 } 286 }
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 5e8e59efb34..4d026f9dd98 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
1052 if (xpc_sysctl) { 1052 if (xpc_sysctl) {
1053 unregister_sysctl_table(xpc_sysctl); 1053 unregister_sysctl_table(xpc_sysctl);
1054 } 1054 }
1055
1056 kfree(xpc_remote_copy_buffer_base);
1055} 1057}
1056 1058
1057 1059
@@ -1212,24 +1214,20 @@ xpc_init(void)
1212 partid_t partid; 1214 partid_t partid;
1213 struct xpc_partition *part; 1215 struct xpc_partition *part;
1214 pid_t pid; 1216 pid_t pid;
1217 size_t buf_size;
1215 1218
1216 1219
1217 if (!ia64_platform_is("sn2")) { 1220 if (!ia64_platform_is("sn2")) {
1218 return -ENODEV; 1221 return -ENODEV;
1219 } 1222 }
1220 1223
1221 /* 1224
1222 * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng 1225 buf_size = max(XPC_RP_VARS_SIZE,
1223 * various portions of a partition's reserved page. Its size is based 1226 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1224 * on the size of the reserved page header and part_nasids mask. So we 1227 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1225 * need to ensure that the other items will fit as well. 1228 GFP_KERNEL, &xpc_remote_copy_buffer_base);
1226 */ 1229 if (xpc_remote_copy_buffer == NULL)
1227 if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { 1230 return -ENOMEM;
1228 dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
1229 return -EPERM;
1230 }
1231 DBUG_ON((u64) xpc_remote_copy_buffer !=
1232 L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
1233 1231
1234 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 1232 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1235 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 1233 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@@ -1293,6 +1291,8 @@ xpc_init(void)
1293 if (xpc_sysctl) { 1291 if (xpc_sysctl) {
1294 unregister_sysctl_table(xpc_sysctl); 1292 unregister_sysctl_table(xpc_sysctl);
1295 } 1293 }
1294
1295 kfree(xpc_remote_copy_buffer_base);
1296 return -EBUSY; 1296 return -EBUSY;
1297 } 1297 }
1298 1298
@@ -1311,6 +1311,8 @@ xpc_init(void)
1311 if (xpc_sysctl) { 1311 if (xpc_sysctl) {
1312 unregister_sysctl_table(xpc_sysctl); 1312 unregister_sysctl_table(xpc_sysctl);
1313 } 1313 }
1314
1315 kfree(xpc_remote_copy_buffer_base);
1314 return -EBUSY; 1316 return -EBUSY;
1315 } 1317 }
1316 1318
@@ -1362,6 +1364,8 @@ xpc_init(void)
1362 if (xpc_sysctl) { 1364 if (xpc_sysctl) {
1363 unregister_sysctl_table(xpc_sysctl); 1365 unregister_sysctl_table(xpc_sysctl);
1364 } 1366 }
1367
1368 kfree(xpc_remote_copy_buffer_base);
1365 return -EBUSY; 1369 return -EBUSY;
1366 } 1370 }
1367 1371
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2a89cfce495..57c723f5cba 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
71 * Generic buffer used to store a local copy of portions of a remote 71 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 72 * partition's reserved page (either its header and part_nasids mask,
73 * or its vars). 73 * or its vars).
74 *
75 * xpc_discovery runs only once and is a seperate thread that is
76 * very likely going to be processing in parallel with receiving
77 * interrupts.
78 */ 74 */
79char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + 75char *xpc_remote_copy_buffer;
80 XP_NASID_MASK_BYTES]; 76void *xpc_remote_copy_buffer_base;
81 77
82 78
83/* 79/*
84 * Guarantee that the kmalloc'd memory is cacheline aligned. 80 * Guarantee that the kmalloc'd memory is cacheline aligned.
85 */ 81 */
86static void * 82void *
87xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) 83xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
88{ 84{
89 /* see if kmalloc will give us cachline aligned memory by default */ 85 /* see if kmalloc will give us cachline aligned memory by default */
@@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
148 } 144 }
149 } 145 }
150 146
151 bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, 147 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
152 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 148 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
153 if (bte_res != BTE_SUCCESS) { 149 if (bte_res != BTE_SUCCESS) {
154 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
@@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
447 443
448 /* pull the remote_hb cache line */ 444 /* pull the remote_hb cache line */
449 bres = xp_bte_copy(part->remote_vars_pa, 445 bres = xp_bte_copy(part->remote_vars_pa,
450 ia64_tpa((u64) remote_vars), 446 (u64) remote_vars,
451 XPC_RP_VARS_SIZE, 447 XPC_RP_VARS_SIZE,
452 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 448 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
453 if (bres != BTE_SUCCESS) { 449 if (bres != BTE_SUCCESS) {
@@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
498 494
499 495
500 /* pull over the reserved page header and part_nasids mask */ 496 /* pull over the reserved page header and part_nasids mask */
501 497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
502 bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
503 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
504 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 499 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
505 if (bres != BTE_SUCCESS) { 500 if (bres != BTE_SUCCESS) {
@@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
554 return xpcVarsNotSet; 549 return xpcVarsNotSet;
555 } 550 }
556 551
557
558 /* pull over the cross partition variables */ 552 /* pull over the cross partition variables */
559 553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
560 bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
561 XPC_RP_VARS_SIZE,
562 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 554 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
563 if (bres != BTE_SUCCESS) { 555 if (bres != BTE_SUCCESS) {
564 return xpc_map_bte_errors(bres); 556 return xpc_map_bte_errors(bres);
@@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1239 1231
1240 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
1241 1233
1242 bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), 1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
1243 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
1244 1236
1245 return xpc_map_bte_errors(bte_res); 1237 return xpc_map_bte_errors(bte_res);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 78414afb0c5..904798fd4e7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -354,6 +354,7 @@ endchoice
354config PPC_PSERIES 354config PPC_PSERIES
355 depends on PPC_MULTIPLATFORM && PPC64 355 depends on PPC_MULTIPLATFORM && PPC64
356 bool "IBM pSeries & new (POWER5-based) iSeries" 356 bool "IBM pSeries & new (POWER5-based) iSeries"
357 select MPIC
357 select PPC_I8259 358 select PPC_I8259
358 select PPC_RTAS 359 select PPC_RTAS
359 select RTAS_ERROR_LOGGING 360 select RTAS_ERROR_LOGGING
@@ -363,6 +364,7 @@ config PPC_PSERIES
363config PPC_CHRP 364config PPC_CHRP
364 bool "Common Hardware Reference Platform (CHRP) based machines" 365 bool "Common Hardware Reference Platform (CHRP) based machines"
365 depends on PPC_MULTIPLATFORM && PPC32 366 depends on PPC_MULTIPLATFORM && PPC32
367 select MPIC
366 select PPC_I8259 368 select PPC_I8259
367 select PPC_INDIRECT_PCI 369 select PPC_INDIRECT_PCI
368 select PPC_RTAS 370 select PPC_RTAS
@@ -373,6 +375,7 @@ config PPC_CHRP
373config PPC_PMAC 375config PPC_PMAC
374 bool "Apple PowerMac based machines" 376 bool "Apple PowerMac based machines"
375 depends on PPC_MULTIPLATFORM 377 depends on PPC_MULTIPLATFORM
378 select MPIC
376 select PPC_INDIRECT_PCI if PPC32 379 select PPC_INDIRECT_PCI if PPC32
377 select PPC_MPC106 if PPC32 380 select PPC_MPC106 if PPC32
378 default y 381 default y
@@ -380,6 +383,7 @@ config PPC_PMAC
380config PPC_PMAC64 383config PPC_PMAC64
381 bool 384 bool
382 depends on PPC_PMAC && POWER4 385 depends on PPC_PMAC && POWER4
386 select MPIC
383 select U3_DART 387 select U3_DART
384 select MPIC_BROKEN_U3 388 select MPIC_BROKEN_U3
385 select GENERIC_TBSYNC 389 select GENERIC_TBSYNC
@@ -389,6 +393,7 @@ config PPC_PMAC64
389config PPC_PREP 393config PPC_PREP
390 bool "PowerPC Reference Platform (PReP) based machines" 394 bool "PowerPC Reference Platform (PReP) based machines"
391 depends on PPC_MULTIPLATFORM && PPC32 && BROKEN 395 depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
396 select MPIC
392 select PPC_I8259 397 select PPC_I8259
393 select PPC_INDIRECT_PCI 398 select PPC_INDIRECT_PCI
394 select PPC_UDBG_16550 399 select PPC_UDBG_16550
@@ -397,6 +402,7 @@ config PPC_PREP
397config PPC_MAPLE 402config PPC_MAPLE
398 depends on PPC_MULTIPLATFORM && PPC64 403 depends on PPC_MULTIPLATFORM && PPC64
399 bool "Maple 970FX Evaluation Board" 404 bool "Maple 970FX Evaluation Board"
405 select MPIC
400 select U3_DART 406 select U3_DART
401 select MPIC_BROKEN_U3 407 select MPIC_BROKEN_U3
402 select GENERIC_TBSYNC 408 select GENERIC_TBSYNC
@@ -440,12 +446,6 @@ config U3_DART
440 depends on PPC_MULTIPLATFORM && PPC64 446 depends on PPC_MULTIPLATFORM && PPC64
441 default n 447 default n
442 448
443config MPIC
444 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP \
445 || MPC7448HPC2
446 bool
447 default y
448
449config PPC_RTAS 449config PPC_RTAS
450 bool 450 bool
451 default n 451 default n
@@ -813,6 +813,14 @@ config GENERIC_ISA_DMA
813 depends on PPC64 || POWER4 || 6xx && !CPM2 813 depends on PPC64 || POWER4 || 6xx && !CPM2
814 default y 814 default y
815 815
816config MPIC
817 bool
818 default n
819
820config MPIC_WEIRD
821 bool
822 default n
823
816config PPC_I8259 824config PPC_I8259
817 bool 825 bool
818 default n 826 default n
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
new file mode 100644
index 00000000000..d7b985e6bd2
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -0,0 +1,190 @@
1/*
2 * MPC7448HPC2 (Taiga) board Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 * 2006 Roy Zang <Roy Zang at freescale.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13
14/ {
15 model = "mpc7448hpc2";
16 compatible = "mpc74xx";
17 #address-cells = <1>;
18 #size-cells = <1>;
19 linux,phandle = <100>;
20
21 cpus {
22 #cpus = <1>;
23 #address-cells = <1>;
24 #size-cells =<0>;
25 linux,phandle = <200>;
26
27 PowerPC,7448@0 {
28 device_type = "cpu";
29 reg = <0>;
30 d-cache-line-size = <20>; // 32 bytes
31 i-cache-line-size = <20>; // 32 bytes
32 d-cache-size = <8000>; // L1, 32K bytes
33 i-cache-size = <8000>; // L1, 32K bytes
34 timebase-frequency = <0>; // 33 MHz, from uboot
35 clock-frequency = <0>; // From U-Boot
36 bus-frequency = <0>; // From U-Boot
37 32-bit;
38 linux,phandle = <201>;
39 linux,boot-cpu;
40 };
41 };
42
43 memory {
44 device_type = "memory";
45 linux,phandle = <300>;
46 reg = <00000000 20000000 // DDR2 512M at 0
47 >;
48 };
49
50 tsi108@c0000000 {
51 #address-cells = <1>;
52 #size-cells = <1>;
53 #interrupt-cells = <2>;
54 device_type = "tsi-bridge";
55 ranges = <00000000 c0000000 00010000>;
56 reg = <c0000000 00010000>;
57 bus-frequency = <0>;
58
59 i2c@7000 {
60 interrupt-parent = <7400>;
61 interrupts = <E 0>;
62 reg = <7000 400>;
63 device_type = "i2c";
64 compatible = "tsi-i2c";
65 };
66
67 mdio@6000 {
68 device_type = "mdio";
69 compatible = "tsi-ethernet";
70
71 ethernet-phy@6000 {
72 linux,phandle = <6000>;
73 interrupt-parent = <7400>;
74 interrupts = <2 1>;
75 reg = <6000 50>;
76 phy-id = <8>;
77 device_type = "ethernet-phy";
78 };
79
80 ethernet-phy@6400 {
81 linux,phandle = <6400>;
82 interrupt-parent = <7400>;
83 interrupts = <2 1>;
84 reg = <6000 50>;
85 phy-id = <9>;
86 device_type = "ethernet-phy";
87 };
88
89 };
90
91 ethernet@6200 {
92 #size-cells = <0>;
93 device_type = "network";
94 model = "TSI-ETH";
95 compatible = "tsi-ethernet";
96 reg = <6000 200>;
97 address = [ 00 06 D2 00 00 01 ];
98 interrupts = <10 2>;
99 interrupt-parent = <7400>;
100 phy-handle = <6000>;
101 };
102
103 ethernet@6600 {
104 #address-cells = <1>;
105 #size-cells = <0>;
106 device_type = "network";
107 model = "TSI-ETH";
108 compatible = "tsi-ethernet";
109 reg = <6400 200>;
110 address = [ 00 06 D2 00 00 02 ];
111 interrupts = <11 2>;
112 interrupt-parent = <7400>;
113 phy-handle = <6400>;
114 };
115
116 serial@7808 {
117 device_type = "serial";
118 compatible = "ns16550";
119 reg = <7808 200>;
120 clock-frequency = <3f6b5a00>;
121 interrupts = <c 0>;
122 interrupt-parent = <7400>;
123 };
124
125 serial@7c08 {
126 device_type = "serial";
127 compatible = "ns16550";
128 reg = <7c08 200>;
129 clock-frequency = <3f6b5a00>;
130 interrupts = <d 0>;
131 interrupt-parent = <7400>;
132 };
133
134 pic@7400 {
135 linux,phandle = <7400>;
136 clock-frequency = <0>;
137 interrupt-controller;
138 #address-cells = <0>;
139 #interrupt-cells = <2>;
140 reg = <7400 400>;
141 built-in;
142 compatible = "chrp,open-pic";
143 device_type = "open-pic";
144 big-endian;
145 };
146 pci@1000 {
147 compatible = "tsi10x";
148 device_type = "pci";
149 linux,phandle = <1000>;
150 #interrupt-cells = <1>;
151 #size-cells = <2>;
152 #address-cells = <3>;
153 reg = <1000 1000>;
154 bus-range = <0 0>;
155 ranges = <02000000 0 e0000000 e0000000 0 1A000000
156 01000000 0 00000000 fa000000 0 00010000>;
157 clock-frequency = <7f28154>;
158 interrupt-parent = <7400>;
159 interrupts = <17 2>;
160 interrupt-map-mask = <f800 0 0 7>;
161 interrupt-map = <
162
163 /* IDSEL 0x11 */
164 0800 0 0 1 7400 24 0
165 0800 0 0 2 7400 25 0
166 0800 0 0 3 7400 26 0
167 0800 0 0 4 7400 27 0
168
169 /* IDSEL 0x12 */
170 1000 0 0 1 7400 25 0
171 1000 0 0 2 7400 26 0
172 1000 0 0 3 7400 27 0
173 1000 0 0 4 7400 24 0
174
175 /* IDSEL 0x13 */
176 1800 0 0 1 7400 26 0
177 1800 0 0 2 7400 27 0
178 1800 0 0 3 7400 24 0
179 1800 0 0 4 7400 25 0
180
181 /* IDSEL 0x14 */
182 2000 0 0 1 7400 27 0
183 2000 0 0 2 7400 24 0
184 2000 0 0 3 7400 25 0
185 2000 0 0 4 7400 26 0
186 >;
187 };
188 };
189
190};
diff --git a/arch/powerpc/boot/dts/mpc8349emds.dts b/arch/powerpc/boot/dts/mpc8349emds.dts
new file mode 100644
index 00000000000..12f5dbf3055
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8349emds.dts
@@ -0,0 +1,328 @@
1/*
2 * MPC8349E MDS Device Tree Source
3 *
4 * Copyright 2005, 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/ {
13 model = "MPC8349EMDS";
14 compatible = "MPC834xMDS";
15 #address-cells = <1>;
16 #size-cells = <1>;
17
18 cpus {
19 #cpus = <1>;
20 #address-cells = <1>;
21 #size-cells = <0>;
22
23 PowerPC,8349@0 {
24 device_type = "cpu";
25 reg = <0>;
26 d-cache-line-size = <20>; // 32 bytes
27 i-cache-line-size = <20>; // 32 bytes
28 d-cache-size = <8000>; // L1, 32K
29 i-cache-size = <8000>; // L1, 32K
30 timebase-frequency = <0>; // from bootloader
31 bus-frequency = <0>; // from bootloader
32 clock-frequency = <0>; // from bootloader
33 32-bit;
34 };
35 };
36
37 memory {
38 device_type = "memory";
39 reg = <00000000 10000000>; // 256MB at 0
40 };
41
42 soc8349@e0000000 {
43 #address-cells = <1>;
44 #size-cells = <1>;
45 #interrupt-cells = <2>;
46 device_type = "soc";
47 ranges = <0 e0000000 00100000>;
48 reg = <e0000000 00000200>;
49 bus-frequency = <0>;
50
51 wdt@200 {
52 device_type = "watchdog";
53 compatible = "mpc83xx_wdt";
54 reg = <200 100>;
55 };
56
57 i2c@3000 {
58 device_type = "i2c";
59 compatible = "fsl-i2c";
60 reg = <3000 100>;
61 interrupts = <e 8>;
62 interrupt-parent = <700>;
63 dfsrr;
64 };
65
66 i2c@3100 {
67 device_type = "i2c";
68 compatible = "fsl-i2c";
69 reg = <3100 100>;
70 interrupts = <f 8>;
71 interrupt-parent = <700>;
72 dfsrr;
73 };
74
75 spi@7000 {
76 device_type = "spi";
77 compatible = "mpc83xx_spi";
78 reg = <7000 1000>;
79 interrupts = <10 8>;
80 interrupt-parent = <700>;
81 mode = <0>;
82 };
83
84 /* phy type (ULPI or SERIAL) are only types supportted for MPH */
85 /* port = 0 or 1 */
86 usb@22000 {
87 device_type = "usb";
88 compatible = "fsl-usb2-mph";
89 reg = <22000 1000>;
90 #address-cells = <1>;
91 #size-cells = <0>;
92 interrupt-parent = <700>;
93 interrupts = <27 2>;
94 phy_type = "ulpi";
95 port1;
96 };
97 /* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
98 usb@23000 {
99 device_type = "usb";
100 compatible = "fsl-usb2-dr";
101 reg = <23000 1000>;
102 #address-cells = <1>;
103 #size-cells = <0>;
104 interrupt-parent = <700>;
105 interrupts = <26 2>;
106 phy_type = "ulpi";
107 };
108
109 mdio@24520 {
110 device_type = "mdio";
111 compatible = "gianfar";
112 reg = <24520 20>;
113 #address-cells = <1>;
114 #size-cells = <0>;
115 linux,phandle = <24520>;
116 ethernet-phy@0 {
117 linux,phandle = <2452000>;
118 interrupt-parent = <700>;
119 interrupts = <11 2>;
120 reg = <0>;
121 device_type = "ethernet-phy";
122 };
123 ethernet-phy@1 {
124 linux,phandle = <2452001>;
125 interrupt-parent = <700>;
126 interrupts = <12 2>;
127 reg = <1>;
128 device_type = "ethernet-phy";
129 };
130 };
131
132 ethernet@24000 {
133 device_type = "network";
134 model = "TSEC";
135 compatible = "gianfar";
136 reg = <24000 1000>;
137 address = [ 00 00 00 00 00 00 ];
138 local-mac-address = [ 00 00 00 00 00 00 ];
139 interrupts = <20 8 21 8 22 8>;
140 interrupt-parent = <700>;
141 phy-handle = <2452000>;
142 };
143
144 ethernet@25000 {
145 #address-cells = <1>;
146 #size-cells = <0>;
147 device_type = "network";
148 model = "TSEC";
149 compatible = "gianfar";
150 reg = <25000 1000>;
151 address = [ 00 00 00 00 00 00 ];
152 local-mac-address = [ 00 00 00 00 00 00 ];
153 interrupts = <23 8 24 8 25 8>;
154 interrupt-parent = <700>;
155 phy-handle = <2452001>;
156 };
157
158 serial@4500 {
159 device_type = "serial";
160 compatible = "ns16550";
161 reg = <4500 100>;
162 clock-frequency = <0>;
163 interrupts = <9 8>;
164 interrupt-parent = <700>;
165 };
166
167 serial@4600 {
168 device_type = "serial";
169 compatible = "ns16550";
170 reg = <4600 100>;
171 clock-frequency = <0>;
172 interrupts = <a 8>;
173 interrupt-parent = <700>;
174 };
175
176 pci@8500 {
177 interrupt-map-mask = <f800 0 0 7>;
178 interrupt-map = <
179
180 /* IDSEL 0x11 */
181 8800 0 0 1 700 14 8
182 8800 0 0 2 700 15 8
183 8800 0 0 3 700 16 8
184 8800 0 0 4 700 17 8
185
186 /* IDSEL 0x12 */
187 9000 0 0 1 700 16 8
188 9000 0 0 2 700 17 8
189 9000 0 0 3 700 14 8
190 9000 0 0 4 700 15 8
191
192 /* IDSEL 0x13 */
193 9800 0 0 1 700 17 8
194 9800 0 0 2 700 14 8
195 9800 0 0 3 700 15 8
196 9800 0 0 4 700 16 8
197
198 /* IDSEL 0x15 */
199 a800 0 0 1 700 14 8
200 a800 0 0 2 700 15 8
201 a800 0 0 3 700 16 8
202 a800 0 0 4 700 17 8
203
204 /* IDSEL 0x16 */
205 b000 0 0 1 700 17 8
206 b000 0 0 2 700 14 8
207 b000 0 0 3 700 15 8
208 b000 0 0 4 700 16 8
209
210 /* IDSEL 0x17 */
211 b800 0 0 1 700 16 8
212 b800 0 0 2 700 17 8
213 b800 0 0 3 700 14 8
214 b800 0 0 4 700 15 8
215
216 /* IDSEL 0x18 */
217 b000 0 0 1 700 15 8
218 b000 0 0 2 700 16 8
219 b000 0 0 3 700 17 8
220 b000 0 0 4 700 14 8>;
221 interrupt-parent = <700>;
222 interrupts = <42 8>;
223 bus-range = <0 0>;
224 ranges = <02000000 0 a0000000 a0000000 0 10000000
225 42000000 0 80000000 80000000 0 10000000
226 01000000 0 00000000 e2000000 0 00100000>;
227 clock-frequency = <3f940aa>;
228 #interrupt-cells = <1>;
229 #size-cells = <2>;
230 #address-cells = <3>;
231 reg = <8500 100>;
232 compatible = "83xx";
233 device_type = "pci";
234 };
235
236 pci@8600 {
237 interrupt-map-mask = <f800 0 0 7>;
238 interrupt-map = <
239
240 /* IDSEL 0x11 */
241 8800 0 0 1 700 14 8
242 8800 0 0 2 700 15 8
243 8800 0 0 3 700 16 8
244 8800 0 0 4 700 17 8
245
246 /* IDSEL 0x12 */
247 9000 0 0 1 700 16 8
248 9000 0 0 2 700 17 8
249 9000 0 0 3 700 14 8
250 9000 0 0 4 700 15 8
251
252 /* IDSEL 0x13 */
253 9800 0 0 1 700 17 8
254 9800 0 0 2 700 14 8
255 9800 0 0 3 700 15 8
256 9800 0 0 4 700 16 8
257
258 /* IDSEL 0x15 */
259 a800 0 0 1 700 14 8
260 a800 0 0 2 700 15 8
261 a800 0 0 3 700 16 8
262 a800 0 0 4 700 17 8
263
264 /* IDSEL 0x16 */
265 b000 0 0 1 700 17 8
266 b000 0 0 2 700 14 8
267 b000 0 0 3 700 15 8
268 b000 0 0 4 700 16 8
269
270 /* IDSEL 0x17 */
271 b800 0 0 1 700 16 8
272 b800 0 0 2 700 17 8
273 b800 0 0 3 700 14 8
274 b800 0 0 4 700 15 8
275
276 /* IDSEL 0x18 */
277 b000 0 0 1 700 15 8
278 b000 0 0 2 700 16 8
279 b000 0 0 3 700 17 8
280 b000 0 0 4 700 14 8>;
281 interrupt-parent = <700>;
282 interrupts = <42 8>;
283 bus-range = <0 0>;
284 ranges = <02000000 0 b0000000 b0000000 0 10000000
285 42000000 0 90000000 90000000 0 10000000
286 01000000 0 00000000 e2100000 0 00100000>;
287 clock-frequency = <3f940aa>;
288 #interrupt-cells = <1>;
289 #size-cells = <2>;
290 #address-cells = <3>;
291 reg = <8600 100>;
292 compatible = "83xx";
293 device_type = "pci";
294 };
295
296 /* May need to remove if on a part without crypto engine */
297 crypto@30000 {
298 device_type = "crypto";
299 model = "SEC2";
300 compatible = "talitos";
301 reg = <30000 10000>;
302 interrupts = <b 8>;
303 interrupt-parent = <700>;
304 num-channels = <4>;
305 channel-fifo-len = <18>;
306 exec-units-mask = <0000007e>;
307 /* desc mask is for rev2.0,
308 * we need runtime fixup for >2.0 */
309 descriptor-types-mask = <01010ebf>;
310 };
311
312 /* IPIC
313 * interrupts cell = <intr #, sense>
314 * sense values match linux IORESOURCE_IRQ_* defines:
315 * sense == 8: Level, low assertion
316 * sense == 2: Edge, high-to-low change
317 */
318 pic@700 {
319 linux,phandle = <700>;
320 interrupt-controller;
321 #address-cells = <0>;
322 #interrupt-cells = <2>;
323 reg = <700 100>;
324 built-in;
325 device_type = "ipic";
326 };
327 };
328};
diff --git a/arch/powerpc/configs/mpc834x_sys_defconfig b/arch/powerpc/configs/mpc834x_mds_defconfig
index 5078b0441d6..5078b0441d6 100644
--- a/arch/powerpc/configs/mpc834x_sys_defconfig
+++ b/arch/powerpc/configs/mpc834x_mds_defconfig
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 7e2c9fe44ac..821e152e093 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -2,6 +2,11 @@
2 * FPU support code, moved here from head.S so that it can be used 2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files. 3 * by chips which use other head-whatever.S files.
4 * 4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Copyright (C) 1996 Paul Mackerras.
8 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
9 *
5 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index fd4ddb858db..b4432332341 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -323,7 +323,8 @@ EXPORT_SYMBOL(do_softirq);
323 323
324static LIST_HEAD(irq_hosts); 324static LIST_HEAD(irq_hosts);
325static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; 325static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
326 326static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
327static unsigned int irq_radix_writer;
327struct irq_map_entry irq_map[NR_IRQS]; 328struct irq_map_entry irq_map[NR_IRQS];
328static unsigned int irq_virq_count = NR_IRQS; 329static unsigned int irq_virq_count = NR_IRQS;
329static struct irq_host *irq_default_host; 330static struct irq_host *irq_default_host;
@@ -456,6 +457,58 @@ void irq_set_virq_count(unsigned int count)
456 irq_virq_count = count; 457 irq_virq_count = count;
457} 458}
458 459
460/* radix tree not lockless safe ! we use a brlock-type mecanism
461 * for now, until we can use a lockless radix tree
462 */
463static void irq_radix_wrlock(unsigned long *flags)
464{
465 unsigned int cpu, ok;
466
467 spin_lock_irqsave(&irq_big_lock, *flags);
468 irq_radix_writer = 1;
469 smp_mb();
470 do {
471 barrier();
472 ok = 1;
473 for_each_possible_cpu(cpu) {
474 if (per_cpu(irq_radix_reader, cpu)) {
475 ok = 0;
476 break;
477 }
478 }
479 if (!ok)
480 cpu_relax();
481 } while(!ok);
482}
483
484static void irq_radix_wrunlock(unsigned long flags)
485{
486 smp_wmb();
487 irq_radix_writer = 0;
488 spin_unlock_irqrestore(&irq_big_lock, flags);
489}
490
491static void irq_radix_rdlock(unsigned long *flags)
492{
493 local_irq_save(*flags);
494 __get_cpu_var(irq_radix_reader) = 1;
495 smp_mb();
496 if (likely(irq_radix_writer == 0))
497 return;
498 __get_cpu_var(irq_radix_reader) = 0;
499 smp_wmb();
500 spin_lock(&irq_big_lock);
501 __get_cpu_var(irq_radix_reader) = 1;
502 spin_unlock(&irq_big_lock);
503}
504
505static void irq_radix_rdunlock(unsigned long flags)
506{
507 __get_cpu_var(irq_radix_reader) = 0;
508 local_irq_restore(flags);
509}
510
511
459unsigned int irq_create_mapping(struct irq_host *host, 512unsigned int irq_create_mapping(struct irq_host *host,
460 irq_hw_number_t hwirq) 513 irq_hw_number_t hwirq)
461{ 514{
@@ -605,13 +658,9 @@ void irq_dispose_mapping(unsigned int virq)
605 /* Check if radix tree allocated yet */ 658 /* Check if radix tree allocated yet */
606 if (host->revmap_data.tree.gfp_mask == 0) 659 if (host->revmap_data.tree.gfp_mask == 0)
607 break; 660 break;
608 /* XXX radix tree not safe ! remove lock whem it becomes safe 661 irq_radix_wrlock(&flags);
609 * and use some RCU sync to make sure everything is ok before we
610 * can re-use that map entry
611 */
612 spin_lock_irqsave(&irq_big_lock, flags);
613 radix_tree_delete(&host->revmap_data.tree, hwirq); 662 radix_tree_delete(&host->revmap_data.tree, hwirq);
614 spin_unlock_irqrestore(&irq_big_lock, flags); 663 irq_radix_wrunlock(flags);
615 break; 664 break;
616 } 665 }
617 666
@@ -678,25 +727,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
678 if (tree->gfp_mask == 0) 727 if (tree->gfp_mask == 0)
679 return irq_find_mapping(host, hwirq); 728 return irq_find_mapping(host, hwirq);
680 729
681 /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
682 * when that is fixed (when Nick's patch gets in
683 */
684 spin_lock_irqsave(&irq_big_lock, flags);
685
686 /* Now try to resolve */ 730 /* Now try to resolve */
731 irq_radix_rdlock(&flags);
687 ptr = radix_tree_lookup(tree, hwirq); 732 ptr = radix_tree_lookup(tree, hwirq);
733 irq_radix_rdunlock(flags);
734
688 /* Found it, return */ 735 /* Found it, return */
689 if (ptr) { 736 if (ptr) {
690 virq = ptr - irq_map; 737 virq = ptr - irq_map;
691 goto bail; 738 return virq;
692 } 739 }
693 740
694 /* If not there, try to insert it */ 741 /* If not there, try to insert it */
695 virq = irq_find_mapping(host, hwirq); 742 virq = irq_find_mapping(host, hwirq);
696 if (virq != NO_IRQ) 743 if (virq != NO_IRQ) {
744 irq_radix_wrlock(&flags);
697 radix_tree_insert(tree, hwirq, &irq_map[virq]); 745 radix_tree_insert(tree, hwirq, &irq_map[virq]);
698 bail: 746 irq_radix_wrunlock(flags);
699 spin_unlock_irqrestore(&irq_big_lock, flags); 747 }
700 return virq; 748 return virq;
701} 749}
702 750
@@ -807,12 +855,12 @@ static int irq_late_init(void)
807 struct irq_host *h; 855 struct irq_host *h;
808 unsigned long flags; 856 unsigned long flags;
809 857
810 spin_lock_irqsave(&irq_big_lock, flags); 858 irq_radix_wrlock(&flags);
811 list_for_each_entry(h, &irq_hosts, link) { 859 list_for_each_entry(h, &irq_hosts, link) {
812 if (h->revmap_type == IRQ_HOST_MAP_TREE) 860 if (h->revmap_type == IRQ_HOST_MAP_TREE)
813 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 861 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
814 } 862 }
815 spin_unlock_irqrestore(&irq_big_lock, flags); 863 irq_radix_wrunlock(flags);
816 864
817 return 0; 865 return 0;
818} 866}
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d51be7c7a2e..c1b1e14775e 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -1254,6 +1254,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1254 1254
1255 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1255 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1256 1256
1257#ifdef DEBUG
1258 memset(&oirq, 0xff, sizeof(oirq));
1259#endif
1257 /* Try to get a mapping from the device-tree */ 1260 /* Try to get a mapping from the device-tree */
1258 if (of_irq_map_pci(pci_dev, &oirq)) { 1261 if (of_irq_map_pci(pci_dev, &oirq)) {
1259 u8 line, pin; 1262 u8 line, pin;
@@ -1279,8 +1282,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1279 if (virq != NO_IRQ) 1282 if (virq != NO_IRQ)
1280 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1283 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1281 } else { 1284 } else {
1282 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1285 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
1283 oirq.size, oirq.specifier[0], oirq.controller->full_name); 1286 oirq.size, oirq.specifier[0], oirq.specifier[1],
1287 oirq.controller->full_name);
1284 1288
1285 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1289 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1286 oirq.size); 1290 oirq.size);
@@ -1289,6 +1293,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1289 DBG(" -> failed to map !\n"); 1293 DBG(" -> failed to map !\n");
1290 return -1; 1294 return -1;
1291 } 1295 }
1296
1297 DBG(" -> mapped to linux irq %d\n", virq);
1298
1292 pci_dev->irq = virq; 1299 pci_dev->irq = virq;
1293 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); 1300 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1294 1301
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 90972ef6c47..b91761639d9 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -646,13 +646,13 @@ static unsigned char ibm_architecture_vec[] = {
646 5 - 1, /* 5 option vectors */ 646 5 - 1, /* 5 option vectors */
647 647
648 /* option vector 1: processor architectures supported */ 648 /* option vector 1: processor architectures supported */
649 3 - 1, /* length */ 649 3 - 2, /* length */
650 0, /* don't ignore, don't halt */ 650 0, /* don't ignore, don't halt */
651 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 651 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
652 OV1_PPC_2_04 | OV1_PPC_2_05, 652 OV1_PPC_2_04 | OV1_PPC_2_05,
653 653
654 /* option vector 2: Open Firmware options supported */ 654 /* option vector 2: Open Firmware options supported */
655 34 - 1, /* length */ 655 34 - 2, /* length */
656 OV2_REAL_MODE, 656 OV2_REAL_MODE,
657 0, 0, 657 0, 0,
658 W(0xffffffff), /* real_base */ 658 W(0xffffffff), /* real_base */
@@ -666,16 +666,16 @@ static unsigned char ibm_architecture_vec[] = {
666 48, /* max log_2(hash table size) */ 666 48, /* max log_2(hash table size) */
667 667
668 /* option vector 3: processor options supported */ 668 /* option vector 3: processor options supported */
669 3 - 1, /* length */ 669 3 - 2, /* length */
670 0, /* don't ignore, don't halt */ 670 0, /* don't ignore, don't halt */
671 OV3_FP | OV3_VMX, 671 OV3_FP | OV3_VMX,
672 672
673 /* option vector 4: IBM PAPR implementation */ 673 /* option vector 4: IBM PAPR implementation */
674 2 - 1, /* length */ 674 2 - 2, /* length */
675 0, /* don't halt */ 675 0, /* don't halt */
676 676
677 /* option vector 5: PAPR/OF options */ 677 /* option vector 5: PAPR/OF options */
678 3 - 1, /* length */ 678 3 - 2, /* length */
679 0, /* don't ignore, don't halt */ 679 0, /* don't ignore, don't halt */
680 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES, 680 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
681}; 681};
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603203276ef..603dff3ad62 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -642,7 +642,7 @@ void of_irq_map_init(unsigned int flags)
642 642
643} 643}
644 644
645int of_irq_map_raw(struct device_node *parent, const u32 *intspec, 645int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize,
646 const u32 *addr, struct of_irq *out_irq) 646 const u32 *addr, struct of_irq *out_irq)
647{ 647{
648 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; 648 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
@@ -650,6 +650,9 @@ int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
650 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; 650 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
651 int imaplen, match, i; 651 int imaplen, match, i;
652 652
653 DBG("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
654 parent->full_name, intspec[0], intspec[1], ointsize);
655
653 ipar = of_node_get(parent); 656 ipar = of_node_get(parent);
654 657
655 /* First get the #interrupt-cells property of the current cursor 658 /* First get the #interrupt-cells property of the current cursor
@@ -673,6 +676,9 @@ int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
673 676
674 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); 677 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
675 678
679 if (ointsize != intsize)
680 return -EINVAL;
681
676 /* Look for this #address-cells. We have to implement the old linux 682 /* Look for this #address-cells. We have to implement the old linux
677 * trick of looking for the parent here as some device-trees rely on it 683 * trick of looking for the parent here as some device-trees rely on it
678 */ 684 */
@@ -879,12 +885,15 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
879 } 885 }
880 intsize = *tmp; 886 intsize = *tmp;
881 887
888 DBG(" intsize=%d intlen=%d\n", intsize, intlen);
889
882 /* Check index */ 890 /* Check index */
883 if ((index + 1) * intsize > intlen) 891 if ((index + 1) * intsize > intlen)
884 return -EINVAL; 892 return -EINVAL;
885 893
886 /* Get new specifier and map it */ 894 /* Get new specifier and map it */
887 res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq); 895 res = of_irq_map_raw(p, intspec + index * intsize, intsize,
896 addr, out_irq);
888 of_node_put(p); 897 of_node_put(p);
889 return res; 898 return res;
890} 899}
@@ -969,7 +978,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
969 laddr[0] = (pdev->bus->number << 16) 978 laddr[0] = (pdev->bus->number << 16)
970 | (pdev->devfn << 8); 979 | (pdev->devfn << 8);
971 laddr[1] = laddr[2] = 0; 980 laddr[1] = laddr[2] = 0;
972 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq); 981 return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
973} 982}
974EXPORT_SYMBOL_GPL(of_irq_map_pci); 983EXPORT_SYMBOL_GPL(of_irq_map_pci);
975#endif /* CONFIG_PCI */ 984#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index f19e2e0e61e..de59c6c31a5 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -45,8 +45,9 @@ void __devinit smp_generic_take_timebase(void)
45{ 45{
46 int cmd; 46 int cmd;
47 u64 tb; 47 u64 tb;
48 unsigned long flags;
48 49
49 local_irq_disable(); 50 local_irq_save(flags);
50 while (!running) 51 while (!running)
51 barrier(); 52 barrier();
52 rmb(); 53 rmb();
@@ -70,7 +71,7 @@ void __devinit smp_generic_take_timebase(void)
70 set_tb(tb >> 32, tb & 0xfffffffful); 71 set_tb(tb >> 32, tb & 0xfffffffful);
71 enter_contest(tbsync->mark, -1); 72 enter_contest(tbsync->mark, -1);
72 } 73 }
73 local_irq_enable(); 74 local_irq_restore(flags);
74} 75}
75 76
76static int __devinit start_contest(int cmd, long offset, int num) 77static int __devinit start_contest(int cmd, long offset, int num)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 272cb826901..b9a2061cfdb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -125,15 +125,8 @@ static long timezone_offset;
125unsigned long ppc_proc_freq; 125unsigned long ppc_proc_freq;
126unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
127 127
128u64 tb_last_jiffy __cacheline_aligned_in_smp; 128static u64 tb_last_jiffy __cacheline_aligned_in_smp;
129unsigned long tb_last_stamp; 129static DEFINE_PER_CPU(u64, last_jiffy);
130
131/*
132 * Note that on ppc32 this only stores the bottom 32 bits of
133 * the timebase value, but that's enough to tell when a jiffy
134 * has passed.
135 */
136DEFINE_PER_CPU(unsigned long, last_jiffy);
137 130
138#ifdef CONFIG_VIRT_CPU_ACCOUNTING 131#ifdef CONFIG_VIRT_CPU_ACCOUNTING
139/* 132/*
@@ -458,7 +451,7 @@ void do_gettimeofday(struct timeval *tv)
458 do { 451 do {
459 seq = read_seqbegin_irqsave(&xtime_lock, flags); 452 seq = read_seqbegin_irqsave(&xtime_lock, flags);
460 sec = xtime.tv_sec; 453 sec = xtime.tv_sec;
461 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 454 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
462 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 455 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
463 usec = nsec / 1000; 456 usec = nsec / 1000;
464 while (usec >= 1000000) { 457 while (usec >= 1000000) {
@@ -700,7 +693,6 @@ void timer_interrupt(struct pt_regs * regs)
700 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 693 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
701 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { 694 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
702 tb_last_jiffy = tb_next_jiffy; 695 tb_last_jiffy = tb_next_jiffy;
703 tb_last_stamp = per_cpu(last_jiffy, cpu);
704 do_timer(regs); 696 do_timer(regs);
705 timer_recalc_offset(tb_last_jiffy); 697 timer_recalc_offset(tb_last_jiffy);
706 timer_check_rtc(); 698 timer_check_rtc();
@@ -749,7 +741,7 @@ void __init smp_space_timers(unsigned int max_cpus)
749 int i; 741 int i;
750 unsigned long half = tb_ticks_per_jiffy / 2; 742 unsigned long half = tb_ticks_per_jiffy / 2;
751 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 743 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
752 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 744 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
753 745
754 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 746 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
755 previous_tb -= tb_ticks_per_jiffy; 747 previous_tb -= tb_ticks_per_jiffy;
@@ -830,7 +822,7 @@ int do_settimeofday(struct timespec *tv)
830 * and therefore the (jiffies - wall_jiffies) computation 822 * and therefore the (jiffies - wall_jiffies) computation
831 * has been removed. 823 * has been removed.
832 */ 824 */
833 tb_delta = tb_ticks_since(tb_last_stamp); 825 tb_delta = tb_ticks_since(tb_last_jiffy);
834 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ 826 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
835 new_nsec -= SCALE_XSEC(tb_delta, 1000000000); 827 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
836 828
@@ -950,8 +942,7 @@ void __init time_init(void)
950 if (__USE_RTC()) { 942 if (__USE_RTC()) {
951 /* 601 processor: dec counts down by 128 every 128ns */ 943 /* 601 processor: dec counts down by 128 every 128ns */
952 ppc_tb_freq = 1000000000; 944 ppc_tb_freq = 1000000000;
953 tb_last_stamp = get_rtcl(); 945 tb_last_jiffy = get_rtcl();
954 tb_last_jiffy = tb_last_stamp;
955 } else { 946 } else {
956 /* Normal PowerPC with timebase register */ 947 /* Normal PowerPC with timebase register */
957 ppc_md.calibrate_decr(); 948 ppc_md.calibrate_decr();
@@ -959,7 +950,7 @@ void __init time_init(void)
959 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 950 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
960 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 951 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
961 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 952 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
962 tb_last_stamp = tb_last_jiffy = get_tb(); 953 tb_last_jiffy = get_tb();
963 } 954 }
964 955
965 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 956 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
@@ -1036,7 +1027,7 @@ void __init time_init(void)
1036 do_gtod.varp = &do_gtod.vars[0]; 1027 do_gtod.varp = &do_gtod.vars[0];
1037 do_gtod.var_idx = 0; 1028 do_gtod.var_idx = 0;
1038 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 1029 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
1039 __get_cpu_var(last_jiffy) = tb_last_stamp; 1030 __get_cpu_var(last_jiffy) = tb_last_jiffy;
1040 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 1031 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1041 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 1032 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
1042 do_gtod.varp->tb_to_xs = tb_to_xs; 1033 do_gtod.varp->tb_to_xs = tb_to_xs;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4d0b4e74d57..9b352bd0a46 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -148,7 +148,7 @@ int die(const char *str, struct pt_regs *regs, long err)
148 panic("Fatal exception in interrupt"); 148 panic("Fatal exception in interrupt");
149 149
150 if (panic_on_oops) 150 if (panic_on_oops)
151 panic("Fatal exception: panic_on_oops"); 151 panic("Fatal exception");
152 152
153 do_exit(err); 153 do_exit(err);
154 154
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index fd66acfd3e3..7173ba98f42 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -11,6 +11,7 @@
11 11
12 .align 7 12 .align 7
13_GLOBAL(memcpy) 13_GLOBAL(memcpy)
14 std r3,48(r1) /* save destination pointer for return value */
14 mtcrf 0x01,r5 15 mtcrf 0x01,r5
15 cmpldi cr1,r5,16 16 cmpldi cr1,r5,16
16 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry 17 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
@@ -38,7 +39,7 @@ _GLOBAL(memcpy)
38 stdu r9,16(r3) 39 stdu r9,16(r3)
39 bdnz 1b 40 bdnz 1b
403: std r8,8(r3) 413: std r8,8(r3)
41 beqlr 42 beq 3f
42 addi r3,r3,16 43 addi r3,r3,16
43 ld r9,8(r4) 44 ld r9,8(r4)
44.Ldo_tail: 45.Ldo_tail:
@@ -53,7 +54,8 @@ _GLOBAL(memcpy)
532: bf cr7*4+3,3f 542: bf cr7*4+3,3f
54 rotldi r9,r9,8 55 rotldi r9,r9,8
55 stb r9,0(r3) 56 stb r9,0(r3)
563: blr 573: ld r3,48(r1) /* return dest pointer */
58 blr
57 59
58.Lsrc_unaligned: 60.Lsrc_unaligned:
59 srdi r6,r5,3 61 srdi r6,r5,3
@@ -115,7 +117,7 @@ _GLOBAL(memcpy)
1155: srd r12,r9,r11 1175: srd r12,r9,r11
116 or r12,r8,r12 118 or r12,r8,r12
117 std r12,24(r3) 119 std r12,24(r3)
118 beqlr 120 beq 4f
119 cmpwi cr1,r5,8 121 cmpwi cr1,r5,8
120 addi r3,r3,32 122 addi r3,r3,32
121 sld r9,r9,r10 123 sld r9,r9,r10
@@ -167,4 +169,5 @@ _GLOBAL(memcpy)
1673: bf cr7*4+3,4f 1693: bf cr7*4+3,4f
168 lbz r0,0(r4) 170 lbz r0,0(r4)
169 stb r0,0(r3) 171 stb r0,0(r3)
1704: blr 1724: ld r3,48(r1) /* return dest pointer */
173 blr
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index d9675f9b976..969fbb6d8c4 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -46,26 +46,6 @@ unsigned long isa_io_base = 0;
46unsigned long isa_mem_base = 0; 46unsigned long isa_mem_base = 0;
47#endif 47#endif
48 48
49#ifdef CONFIG_PCI
50static int
51mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
52{
53 static char pci_irq_table[][4] =
54 /*
55 * PCI IDSEL/INTPIN->INTLINE
56 * A B C D
57 */
58 {
59 {PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x0e */
60 {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x0f */
61 {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x10 */
62 };
63
64 const long min_idsel = 0x0e, max_idsel = 0x10, irqs_per_slot = 4;
65 return PCI_IRQ_TABLE_LOOKUP;
66}
67#endif /* CONFIG_PCI */
68
69/* ************************************************************************ 49/* ************************************************************************
70 * 50 *
71 * Setup the architecture 51 * Setup the architecture
@@ -92,8 +72,6 @@ static void __init mpc834x_itx_setup_arch(void)
92 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 72 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
93 add_bridge(np); 73 add_bridge(np);
94 74
95 ppc_md.pci_swizzle = common_swizzle;
96 ppc_md.pci_map_irq = mpc83xx_map_irq;
97 ppc_md.pci_exclude_device = mpc83xx_exclude_device; 75 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
98#endif 76#endif
99 77
@@ -106,25 +84,13 @@ static void __init mpc834x_itx_setup_arch(void)
106 84
107void __init mpc834x_itx_init_IRQ(void) 85void __init mpc834x_itx_init_IRQ(void)
108{ 86{
109 u8 senses[8] = { 87 struct device_node *np;
110 0, /* EXT 0 */ 88
111 IRQ_SENSE_LEVEL, /* EXT 1 */ 89 np = of_find_node_by_type(NULL, "ipic");
112 IRQ_SENSE_LEVEL, /* EXT 2 */ 90 if (!np)
113 0, /* EXT 3 */ 91 return;
114#ifdef CONFIG_PCI
115 IRQ_SENSE_LEVEL, /* EXT 4 */
116 IRQ_SENSE_LEVEL, /* EXT 5 */
117 IRQ_SENSE_LEVEL, /* EXT 6 */
118 IRQ_SENSE_LEVEL, /* EXT 7 */
119#else
120 0, /* EXT 4 */
121 0, /* EXT 5 */
122 0, /* EXT 6 */
123 0, /* EXT 7 */
124#endif
125 };
126 92
127 ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8); 93 ipic_init(np, 0);
128 94
129 /* Initialize the default interrupt mapping priorities, 95 /* Initialize the default interrupt mapping priorities,
130 * in case the boot rom changed something on us. 96 * in case the boot rom changed something on us.
@@ -153,4 +119,7 @@ define_machine(mpc834x_itx) {
153 .time_init = mpc83xx_time_init, 119 .time_init = mpc83xx_time_init,
154 .calibrate_decr = generic_calibrate_decr, 120 .calibrate_decr = generic_calibrate_decr,
155 .progress = udbg_progress, 121 .progress = udbg_progress,
122#ifdef CONFIG_PCI
123 .pcibios_fixup = mpc83xx_pcibios_fixup,
124#endif
156}; 125};
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 5eadf9d035f..677196187a4 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -43,33 +43,6 @@ unsigned long isa_io_base = 0;
43unsigned long isa_mem_base = 0; 43unsigned long isa_mem_base = 0;
44#endif 44#endif
45 45
46#ifdef CONFIG_PCI
47static int
48mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
49{
50 static char pci_irq_table[][4] =
51 /*
52 * PCI IDSEL/INTPIN->INTLINE
53 * A B C D
54 */
55 {
56 {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x11 */
57 {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x12 */
58 {PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x13 */
59 {0, 0, 0, 0},
60 {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x15 */
61 {PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x16 */
62 {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x17 */
63 {PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x18 */
64 {0, 0, 0, 0}, /* idsel 0x19 */
65 {0, 0, 0, 0}, /* idsel 0x20 */
66 };
67
68 const long min_idsel = 0x11, max_idsel = 0x20, irqs_per_slot = 4;
69 return PCI_IRQ_TABLE_LOOKUP;
70}
71#endif /* CONFIG_PCI */
72
73/* ************************************************************************ 46/* ************************************************************************
74 * 47 *
75 * Setup the architecture 48 * Setup the architecture
@@ -96,8 +69,6 @@ static void __init mpc834x_sys_setup_arch(void)
96 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 69 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
97 add_bridge(np); 70 add_bridge(np);
98 71
99 ppc_md.pci_swizzle = common_swizzle;
100 ppc_md.pci_map_irq = mpc83xx_map_irq;
101 ppc_md.pci_exclude_device = mpc83xx_exclude_device; 72 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
102#endif 73#endif
103 74
@@ -110,25 +81,13 @@ static void __init mpc834x_sys_setup_arch(void)
110 81
111void __init mpc834x_sys_init_IRQ(void) 82void __init mpc834x_sys_init_IRQ(void)
112{ 83{
113 u8 senses[8] = { 84 struct device_node *np;
114 0, /* EXT 0 */ 85
115 IRQ_SENSE_LEVEL, /* EXT 1 */ 86 np = of_find_node_by_type(NULL, "ipic");
116 IRQ_SENSE_LEVEL, /* EXT 2 */ 87 if (!np)
117 0, /* EXT 3 */ 88 return;
118#ifdef CONFIG_PCI
119 IRQ_SENSE_LEVEL, /* EXT 4 */
120 IRQ_SENSE_LEVEL, /* EXT 5 */
121 IRQ_SENSE_LEVEL, /* EXT 6 */
122 IRQ_SENSE_LEVEL, /* EXT 7 */
123#else
124 0, /* EXT 4 */
125 0, /* EXT 5 */
126 0, /* EXT 6 */
127 0, /* EXT 7 */
128#endif
129 };
130 89
131 ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8); 90 ipic_init(np, 0);
132 91
133 /* Initialize the default interrupt mapping priorities, 92 /* Initialize the default interrupt mapping priorities,
134 * in case the boot rom changed something on us. 93 * in case the boot rom changed something on us.
@@ -178,4 +137,7 @@ define_machine(mpc834x_sys) {
178 .time_init = mpc83xx_time_init, 137 .time_init = mpc83xx_time_init,
179 .calibrate_decr = generic_calibrate_decr, 138 .calibrate_decr = generic_calibrate_decr,
180 .progress = udbg_progress, 139 .progress = udbg_progress,
140#ifdef CONFIG_PCI
141 .pcibios_fixup = mpc83xx_pcibios_fixup,
142#endif
181}; 143};
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 01cae106912..2c82bca9bfb 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -11,6 +11,7 @@
11 11
12extern int add_bridge(struct device_node *dev); 12extern int add_bridge(struct device_node *dev);
13extern int mpc83xx_exclude_device(u_char bus, u_char devfn); 13extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
14extern void mpc83xx_pcibios_fixup(void);
14extern void mpc83xx_restart(char *cmd); 15extern void mpc83xx_restart(char *cmd);
15extern long mpc83xx_time_init(void); 16extern long mpc83xx_time_init(void);
16 17
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 9c365055514..4557ac5255c 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -45,6 +45,15 @@ int mpc83xx_exclude_device(u_char bus, u_char devfn)
45 return PCIBIOS_SUCCESSFUL; 45 return PCIBIOS_SUCCESSFUL;
46} 46}
47 47
48void __init mpc83xx_pcibios_fixup(void)
49{
50 struct pci_dev *dev = NULL;
51
52 /* map all the PCI irqs */
53 for_each_pci_dev(dev)
54 pci_read_irq_line(dev);
55}
56
48int __init add_bridge(struct device_node *dev) 57int __init add_bridge(struct device_node *dev)
49{ 58{
50 int len; 59 int len;
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index ba07a9a7c03..234a861870a 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -80,6 +80,7 @@ config MPC7448HPC2
80 select DEFAULT_UIMAGE 80 select DEFAULT_UIMAGE
81 select PPC_UDBG_16550 81 select PPC_UDBG_16550
82 select MPIC 82 select MPIC
83 select MPIC_WEIRD
83 help 84 help
84 Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga) 85 Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
85 platform 86 platform
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 673ee69c62e..e4f2b9df5e1 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -215,7 +215,7 @@ static void __init mpc7448_hpc2_init_IRQ(void)
215 215
216 mpic = mpic_alloc(tsi_pic, mpic_paddr, 216 mpic = mpic_alloc(tsi_pic, mpic_paddr,
217 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | 217 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
218 MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108), 218 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
219 0, /* num_sources used */ 219 0, /* num_sources used */
220 0, /* num_sources used */ 220 0, /* num_sources used */
221 "Tsi108_PIC"); 221 "Tsi108_PIC");
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index cebfae24260..e5e999ea891 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_BOOKE) += dcr.o
9obj-$(CONFIG_40x) += dcr.o 9obj-$(CONFIG_40x) += dcr.o
10obj-$(CONFIG_U3_DART) += dart_iommu.o 10obj-$(CONFIG_U3_DART) += dart_iommu.o
11obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o 11obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
12obj-$(CONFIG_PPC_83xx) += ipic.o
13obj-$(CONFIG_FSL_SOC) += fsl_soc.o 12obj-$(CONFIG_FSL_SOC) += fsl_soc.o
14obj-$(CONFIG_PPC_TODC) += todc.o 13obj-$(CONFIG_PPC_TODC) += todc.o
15obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 14obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
16 15
17ifeq ($(CONFIG_PPC_MERGE),y) 16ifeq ($(CONFIG_PPC_MERGE),y)
18obj-$(CONFIG_PPC_I8259) += i8259.o 17obj-$(CONFIG_PPC_I8259) += i8259.o
19 endif 18obj-$(CONFIG_PPC_83xx) += ipic.o
19endif
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 46801f5ec03..70e707785d4 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -19,15 +19,18 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/device.h>
23#include <linux/bootmem.h>
24#include <linux/spinlock.h>
22#include <asm/irq.h> 25#include <asm/irq.h>
23#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/prom.h>
24#include <asm/ipic.h> 28#include <asm/ipic.h>
25#include <asm/mpc83xx.h>
26 29
27#include "ipic.h" 30#include "ipic.h"
28 31
29static struct ipic p_ipic;
30static struct ipic * primary_ipic; 32static struct ipic * primary_ipic;
33static DEFINE_SPINLOCK(ipic_lock);
31 34
32static struct ipic_info ipic_info[] = { 35static struct ipic_info ipic_info[] = {
33 [9] = { 36 [9] = {
@@ -373,74 +376,220 @@ static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32
373 out_be32(base + (reg >> 2), value); 376 out_be32(base + (reg >> 2), value);
374} 377}
375 378
376static inline struct ipic * ipic_from_irq(unsigned int irq) 379static inline struct ipic * ipic_from_irq(unsigned int virq)
377{ 380{
378 return primary_ipic; 381 return primary_ipic;
379} 382}
380 383
381static void ipic_enable_irq(unsigned int irq) 384#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
385
386static void ipic_unmask_irq(unsigned int virq)
382{ 387{
383 struct ipic *ipic = ipic_from_irq(irq); 388 struct ipic *ipic = ipic_from_irq(virq);
384 unsigned int src = irq - ipic->irq_offset; 389 unsigned int src = ipic_irq_to_hw(virq);
390 unsigned long flags;
385 u32 temp; 391 u32 temp;
386 392
393 spin_lock_irqsave(&ipic_lock, flags);
394
387 temp = ipic_read(ipic->regs, ipic_info[src].mask); 395 temp = ipic_read(ipic->regs, ipic_info[src].mask);
388 temp |= (1 << (31 - ipic_info[src].bit)); 396 temp |= (1 << (31 - ipic_info[src].bit));
389 ipic_write(ipic->regs, ipic_info[src].mask, temp); 397 ipic_write(ipic->regs, ipic_info[src].mask, temp);
398
399 spin_unlock_irqrestore(&ipic_lock, flags);
390} 400}
391 401
392static void ipic_disable_irq(unsigned int irq) 402static void ipic_mask_irq(unsigned int virq)
393{ 403{
394 struct ipic *ipic = ipic_from_irq(irq); 404 struct ipic *ipic = ipic_from_irq(virq);
395 unsigned int src = irq - ipic->irq_offset; 405 unsigned int src = ipic_irq_to_hw(virq);
406 unsigned long flags;
396 u32 temp; 407 u32 temp;
397 408
409 spin_lock_irqsave(&ipic_lock, flags);
410
398 temp = ipic_read(ipic->regs, ipic_info[src].mask); 411 temp = ipic_read(ipic->regs, ipic_info[src].mask);
399 temp &= ~(1 << (31 - ipic_info[src].bit)); 412 temp &= ~(1 << (31 - ipic_info[src].bit));
400 ipic_write(ipic->regs, ipic_info[src].mask, temp); 413 ipic_write(ipic->regs, ipic_info[src].mask, temp);
414
415 spin_unlock_irqrestore(&ipic_lock, flags);
401} 416}
402 417
403static void ipic_disable_irq_and_ack(unsigned int irq) 418static void ipic_ack_irq(unsigned int virq)
404{ 419{
405 struct ipic *ipic = ipic_from_irq(irq); 420 struct ipic *ipic = ipic_from_irq(virq);
406 unsigned int src = irq - ipic->irq_offset; 421 unsigned int src = ipic_irq_to_hw(virq);
422 unsigned long flags;
407 u32 temp; 423 u32 temp;
408 424
409 ipic_disable_irq(irq); 425 spin_lock_irqsave(&ipic_lock, flags);
410 426
411 temp = ipic_read(ipic->regs, ipic_info[src].pend); 427 temp = ipic_read(ipic->regs, ipic_info[src].pend);
412 temp |= (1 << (31 - ipic_info[src].bit)); 428 temp |= (1 << (31 - ipic_info[src].bit));
413 ipic_write(ipic->regs, ipic_info[src].pend, temp); 429 ipic_write(ipic->regs, ipic_info[src].pend, temp);
430
431 spin_unlock_irqrestore(&ipic_lock, flags);
414} 432}
415 433
416static void ipic_end_irq(unsigned int irq) 434static void ipic_mask_irq_and_ack(unsigned int virq)
417{ 435{
418 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) 436 struct ipic *ipic = ipic_from_irq(virq);
419 ipic_enable_irq(irq); 437 unsigned int src = ipic_irq_to_hw(virq);
438 unsigned long flags;
439 u32 temp;
440
441 spin_lock_irqsave(&ipic_lock, flags);
442
443 temp = ipic_read(ipic->regs, ipic_info[src].mask);
444 temp &= ~(1 << (31 - ipic_info[src].bit));
445 ipic_write(ipic->regs, ipic_info[src].mask, temp);
446
447 temp = ipic_read(ipic->regs, ipic_info[src].pend);
448 temp |= (1 << (31 - ipic_info[src].bit));
449 ipic_write(ipic->regs, ipic_info[src].pend, temp);
450
451 spin_unlock_irqrestore(&ipic_lock, flags);
420} 452}
421 453
422struct hw_interrupt_type ipic = { 454static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
423 .typename = " IPIC ", 455{
424 .enable = ipic_enable_irq, 456 struct ipic *ipic = ipic_from_irq(virq);
425 .disable = ipic_disable_irq, 457 unsigned int src = ipic_irq_to_hw(virq);
426 .ack = ipic_disable_irq_and_ack, 458 struct irq_desc *desc = get_irq_desc(virq);
427 .end = ipic_end_irq, 459 unsigned int vold, vnew, edibit;
460
461 if (flow_type == IRQ_TYPE_NONE)
462 flow_type = IRQ_TYPE_LEVEL_LOW;
463
464 /* ipic supports only low assertion and high-to-low change senses
465 */
466 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
467 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
468 flow_type);
469 return -EINVAL;
470 }
471
472 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
473 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
474 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
475 desc->status |= IRQ_LEVEL;
476 set_irq_handler(virq, handle_level_irq);
477 } else {
478 set_irq_handler(virq, handle_edge_irq);
479 }
480
481 /* only EXT IRQ senses are programmable on ipic
482 * internal IRQ senses are LEVEL_LOW
483 */
484 if (src == IPIC_IRQ_EXT0)
485 edibit = 15;
486 else
487 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
488 edibit = (14 - (src - IPIC_IRQ_EXT1));
489 else
490 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
491
492 vold = ipic_read(ipic->regs, IPIC_SECNR);
493 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
494 vnew = vold | (1 << edibit);
495 } else {
496 vnew = vold & ~(1 << edibit);
497 }
498 if (vold != vnew)
499 ipic_write(ipic->regs, IPIC_SECNR, vnew);
500 return 0;
501}
502
503static struct irq_chip ipic_irq_chip = {
504 .typename = " IPIC ",
505 .unmask = ipic_unmask_irq,
506 .mask = ipic_mask_irq,
507 .mask_ack = ipic_mask_irq_and_ack,
508 .ack = ipic_ack_irq,
509 .set_type = ipic_set_irq_type,
510};
511
512static int ipic_host_match(struct irq_host *h, struct device_node *node)
513{
514 struct ipic *ipic = h->host_data;
515
516 /* Exact match, unless ipic node is NULL */
517 return ipic->of_node == NULL || ipic->of_node == node;
518}
519
520static int ipic_host_map(struct irq_host *h, unsigned int virq,
521 irq_hw_number_t hw)
522{
523 struct ipic *ipic = h->host_data;
524 struct irq_chip *chip;
525
526 /* Default chip */
527 chip = &ipic->hc_irq;
528
529 set_irq_chip_data(virq, ipic);
530 set_irq_chip_and_handler(virq, chip, handle_level_irq);
531
532 /* Set default irq type */
533 set_irq_type(virq, IRQ_TYPE_NONE);
534
535 return 0;
536}
537
538static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
539 u32 *intspec, unsigned int intsize,
540 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
541
542{
543 /* interrupt sense values coming from the device tree equal either
544 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
545 */
546 *out_hwirq = intspec[0];
547 if (intsize > 1)
548 *out_flags = intspec[1];
549 else
550 *out_flags = IRQ_TYPE_NONE;
551 return 0;
552}
553
554static struct irq_host_ops ipic_host_ops = {
555 .match = ipic_host_match,
556 .map = ipic_host_map,
557 .xlate = ipic_host_xlate,
428}; 558};
429 559
430void __init ipic_init(phys_addr_t phys_addr, 560void __init ipic_init(struct device_node *node,
431 unsigned int flags, 561 unsigned int flags)
432 unsigned int irq_offset,
433 unsigned char *senses,
434 unsigned int senses_count)
435{ 562{
436 u32 i, temp = 0; 563 struct ipic *ipic;
564 struct resource res;
565 u32 temp = 0, ret;
566
567 ipic = alloc_bootmem(sizeof(struct ipic));
568 if (ipic == NULL)
569 return;
570
571 memset(ipic, 0, sizeof(struct ipic));
572 ipic->of_node = node ? of_node_get(node) : NULL;
573
574 ipic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
575 NR_IPIC_INTS,
576 &ipic_host_ops, 0);
577 if (ipic->irqhost == NULL) {
578 of_node_put(node);
579 return;
580 }
581
582 ret = of_address_to_resource(node, 0, &res);
583 if (ret)
584 return;
437 585
438 primary_ipic = &p_ipic; 586 ipic->regs = ioremap(res.start, res.end - res.start + 1);
439 primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE);
440 587
441 primary_ipic->irq_offset = irq_offset; 588 ipic->irqhost->host_data = ipic;
589 ipic->hc_irq = ipic_irq_chip;
442 590
443 ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0); 591 /* init hw */
592 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
444 593
445 /* default priority scheme is grouped. If spread mode is required 594 /* default priority scheme is grouped. If spread mode is required
446 * configure SICFR accordingly */ 595 * configure SICFR accordingly */
@@ -453,49 +602,35 @@ void __init ipic_init(phys_addr_t phys_addr,
453 if (flags & IPIC_SPREADMODE_MIX_B) 602 if (flags & IPIC_SPREADMODE_MIX_B)
454 temp |= SICFR_MPSB; 603 temp |= SICFR_MPSB;
455 604
456 ipic_write(primary_ipic->regs, IPIC_SICNR, temp); 605 ipic_write(ipic->regs, IPIC_SICNR, temp);
457 606
458 /* handle MCP route */ 607 /* handle MCP route */
459 temp = 0; 608 temp = 0;
460 if (flags & IPIC_DISABLE_MCP_OUT) 609 if (flags & IPIC_DISABLE_MCP_OUT)
461 temp = SERCR_MCPR; 610 temp = SERCR_MCPR;
462 ipic_write(primary_ipic->regs, IPIC_SERCR, temp); 611 ipic_write(ipic->regs, IPIC_SERCR, temp);
463 612
464 /* handle routing of IRQ0 to MCP */ 613 /* handle routing of IRQ0 to MCP */
465 temp = ipic_read(primary_ipic->regs, IPIC_SEMSR); 614 temp = ipic_read(ipic->regs, IPIC_SEMSR);
466 615
467 if (flags & IPIC_IRQ0_MCP) 616 if (flags & IPIC_IRQ0_MCP)
468 temp |= SEMSR_SIRQ0; 617 temp |= SEMSR_SIRQ0;
469 else 618 else
470 temp &= ~SEMSR_SIRQ0; 619 temp &= ~SEMSR_SIRQ0;
471 620
472 ipic_write(primary_ipic->regs, IPIC_SEMSR, temp); 621 ipic_write(ipic->regs, IPIC_SEMSR, temp);
473 622
474 for (i = 0 ; i < NR_IPIC_INTS ; i++) { 623 primary_ipic = ipic;
475 irq_desc[i+irq_offset].chip = &ipic; 624 irq_set_default_host(primary_ipic->irqhost);
476 irq_desc[i+irq_offset].status = IRQ_LEVEL;
477 }
478 625
479 temp = 0; 626 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
480 for (i = 0 ; i < senses_count ; i++) { 627 primary_ipic->regs);
481 if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) {
482 temp |= 1 << (15 - i);
483 if (i != 0)
484 irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0;
485 else
486 irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0;
487 }
488 }
489 ipic_write(primary_ipic->regs, IPIC_SECNR, temp);
490
491 printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS,
492 senses_count, primary_ipic->regs);
493} 628}
494 629
495int ipic_set_priority(unsigned int irq, unsigned int priority) 630int ipic_set_priority(unsigned int virq, unsigned int priority)
496{ 631{
497 struct ipic *ipic = ipic_from_irq(irq); 632 struct ipic *ipic = ipic_from_irq(virq);
498 unsigned int src = irq - ipic->irq_offset; 633 unsigned int src = ipic_irq_to_hw(virq);
499 u32 temp; 634 u32 temp;
500 635
501 if (priority > 7) 636 if (priority > 7)
@@ -520,10 +655,10 @@ int ipic_set_priority(unsigned int irq, unsigned int priority)
520 return 0; 655 return 0;
521} 656}
522 657
523void ipic_set_highest_priority(unsigned int irq) 658void ipic_set_highest_priority(unsigned int virq)
524{ 659{
525 struct ipic *ipic = ipic_from_irq(irq); 660 struct ipic *ipic = ipic_from_irq(virq);
526 unsigned int src = irq - ipic->irq_offset; 661 unsigned int src = ipic_irq_to_hw(virq);
527 u32 temp; 662 u32 temp;
528 663
529 temp = ipic_read(ipic->regs, IPIC_SICFR); 664 temp = ipic_read(ipic->regs, IPIC_SICFR);
@@ -537,37 +672,10 @@ void ipic_set_highest_priority(unsigned int irq)
537 672
538void ipic_set_default_priority(void) 673void ipic_set_default_priority(void)
539{ 674{
540 ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0); 675 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_SIPRR_A_DEFAULT);
541 ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1); 676 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_SIPRR_D_DEFAULT);
542 ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2); 677 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_SMPRR_A_DEFAULT);
543 ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3); 678 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_SMPRR_B_DEFAULT);
544 ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4);
545 ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5);
546 ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6);
547 ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7);
548
549 ipic_set_priority(MPC83xx_IRQ_UART1, 0);
550 ipic_set_priority(MPC83xx_IRQ_UART2, 1);
551 ipic_set_priority(MPC83xx_IRQ_SEC2, 2);
552 ipic_set_priority(MPC83xx_IRQ_IIC1, 5);
553 ipic_set_priority(MPC83xx_IRQ_IIC2, 6);
554 ipic_set_priority(MPC83xx_IRQ_SPI, 7);
555 ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0);
556 ipic_set_priority(MPC83xx_IRQ_PIT, 1);
557 ipic_set_priority(MPC83xx_IRQ_PCI1, 2);
558 ipic_set_priority(MPC83xx_IRQ_PCI2, 3);
559 ipic_set_priority(MPC83xx_IRQ_EXT0, 4);
560 ipic_set_priority(MPC83xx_IRQ_EXT1, 5);
561 ipic_set_priority(MPC83xx_IRQ_EXT2, 6);
562 ipic_set_priority(MPC83xx_IRQ_EXT3, 7);
563 ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0);
564 ipic_set_priority(MPC83xx_IRQ_MU, 1);
565 ipic_set_priority(MPC83xx_IRQ_SBA, 2);
566 ipic_set_priority(MPC83xx_IRQ_DMA, 3);
567 ipic_set_priority(MPC83xx_IRQ_EXT4, 4);
568 ipic_set_priority(MPC83xx_IRQ_EXT5, 5);
569 ipic_set_priority(MPC83xx_IRQ_EXT6, 6);
570 ipic_set_priority(MPC83xx_IRQ_EXT7, 7);
571} 679}
572 680
573void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq) 681void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
@@ -600,17 +708,20 @@ void ipic_clear_mcp_status(u32 mask)
600 ipic_write(primary_ipic->regs, IPIC_SERMR, mask); 708 ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
601} 709}
602 710
603/* Return an interrupt vector or -1 if no interrupt is pending. */ 711/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
604int ipic_get_irq(struct pt_regs *regs) 712unsigned int ipic_get_irq(struct pt_regs *regs)
605{ 713{
606 int irq; 714 int irq;
607 715
608 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f; 716 BUG_ON(primary_ipic == NULL);
717
718#define IPIC_SIVCR_VECTOR_MASK 0x7f
719 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
609 720
610 if (irq == 0) /* 0 --> no irq is pending */ 721 if (irq == 0) /* 0 --> no irq is pending */
611 irq = -1; 722 return NO_IRQ;
612 723
613 return irq; 724 return irq_linear_revmap(primary_ipic->irqhost, irq);
614} 725}
615 726
616static struct sysdev_class ipic_sysclass = { 727static struct sysdev_class ipic_sysclass = {
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
index a60c9d18bb7..c28e589877e 100644
--- a/arch/powerpc/sysdev/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
@@ -15,7 +15,18 @@
15 15
16#include <asm/ipic.h> 16#include <asm/ipic.h>
17 17
18#define MPC83xx_IPIC_SIZE (0x00100) 18#define NR_IPIC_INTS 128
19
20/* External IRQS */
21#define IPIC_IRQ_EXT0 48
22#define IPIC_IRQ_EXT1 17
23#define IPIC_IRQ_EXT7 23
24
25/* Default Priority Registers */
26#define IPIC_SIPRR_A_DEFAULT 0x05309770
27#define IPIC_SIPRR_D_DEFAULT 0x05309770
28#define IPIC_SMPRR_A_DEFAULT 0x05309770
29#define IPIC_SMPRR_B_DEFAULT 0x05309770
19 30
20/* System Global Interrupt Configuration Register */ 31/* System Global Interrupt Configuration Register */
21#define SICFR_IPSA 0x00010000 32#define SICFR_IPSA 0x00010000
@@ -31,7 +42,15 @@
31 42
32struct ipic { 43struct ipic {
33 volatile u32 __iomem *regs; 44 volatile u32 __iomem *regs;
34 unsigned int irq_offset; 45
46 /* The remapper for this IPIC */
47 struct irq_host *irqhost;
48
49 /* The "linux" controller struct */
50 struct irq_chip hc_irq;
51
52 /* The device node of the interrupt controller */
53 struct device_node *of_node;
35}; 54};
36 55
37struct ipic_info { 56struct ipic_info {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 6e0281afa6c..b604926401f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -54,6 +54,94 @@ static DEFINE_SPINLOCK(mpic_lock);
54#endif 54#endif
55#endif 55#endif
56 56
57#ifdef CONFIG_MPIC_WEIRD
58static u32 mpic_infos[][MPIC_IDX_END] = {
59 [0] = { /* Original OpenPIC compatible MPIC */
60 MPIC_GREG_BASE,
61 MPIC_GREG_FEATURE_0,
62 MPIC_GREG_GLOBAL_CONF_0,
63 MPIC_GREG_VENDOR_ID,
64 MPIC_GREG_IPI_VECTOR_PRI_0,
65 MPIC_GREG_IPI_STRIDE,
66 MPIC_GREG_SPURIOUS,
67 MPIC_GREG_TIMER_FREQ,
68
69 MPIC_TIMER_BASE,
70 MPIC_TIMER_STRIDE,
71 MPIC_TIMER_CURRENT_CNT,
72 MPIC_TIMER_BASE_CNT,
73 MPIC_TIMER_VECTOR_PRI,
74 MPIC_TIMER_DESTINATION,
75
76 MPIC_CPU_BASE,
77 MPIC_CPU_STRIDE,
78 MPIC_CPU_IPI_DISPATCH_0,
79 MPIC_CPU_IPI_DISPATCH_STRIDE,
80 MPIC_CPU_CURRENT_TASK_PRI,
81 MPIC_CPU_WHOAMI,
82 MPIC_CPU_INTACK,
83 MPIC_CPU_EOI,
84
85 MPIC_IRQ_BASE,
86 MPIC_IRQ_STRIDE,
87 MPIC_IRQ_VECTOR_PRI,
88 MPIC_VECPRI_VECTOR_MASK,
89 MPIC_VECPRI_POLARITY_POSITIVE,
90 MPIC_VECPRI_POLARITY_NEGATIVE,
91 MPIC_VECPRI_SENSE_LEVEL,
92 MPIC_VECPRI_SENSE_EDGE,
93 MPIC_VECPRI_POLARITY_MASK,
94 MPIC_VECPRI_SENSE_MASK,
95 MPIC_IRQ_DESTINATION
96 },
97 [1] = { /* Tsi108/109 PIC */
98 TSI108_GREG_BASE,
99 TSI108_GREG_FEATURE_0,
100 TSI108_GREG_GLOBAL_CONF_0,
101 TSI108_GREG_VENDOR_ID,
102 TSI108_GREG_IPI_VECTOR_PRI_0,
103 TSI108_GREG_IPI_STRIDE,
104 TSI108_GREG_SPURIOUS,
105 TSI108_GREG_TIMER_FREQ,
106
107 TSI108_TIMER_BASE,
108 TSI108_TIMER_STRIDE,
109 TSI108_TIMER_CURRENT_CNT,
110 TSI108_TIMER_BASE_CNT,
111 TSI108_TIMER_VECTOR_PRI,
112 TSI108_TIMER_DESTINATION,
113
114 TSI108_CPU_BASE,
115 TSI108_CPU_STRIDE,
116 TSI108_CPU_IPI_DISPATCH_0,
117 TSI108_CPU_IPI_DISPATCH_STRIDE,
118 TSI108_CPU_CURRENT_TASK_PRI,
119 TSI108_CPU_WHOAMI,
120 TSI108_CPU_INTACK,
121 TSI108_CPU_EOI,
122
123 TSI108_IRQ_BASE,
124 TSI108_IRQ_STRIDE,
125 TSI108_IRQ_VECTOR_PRI,
126 TSI108_VECPRI_VECTOR_MASK,
127 TSI108_VECPRI_POLARITY_POSITIVE,
128 TSI108_VECPRI_POLARITY_NEGATIVE,
129 TSI108_VECPRI_SENSE_LEVEL,
130 TSI108_VECPRI_SENSE_EDGE,
131 TSI108_VECPRI_POLARITY_MASK,
132 TSI108_VECPRI_SENSE_MASK,
133 TSI108_IRQ_DESTINATION
134 },
135};
136
137#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
138
139#else /* CONFIG_MPIC_WEIRD */
140
141#define MPIC_INFO(name) MPIC_##name
142
143#endif /* CONFIG_MPIC_WEIRD */
144
57/* 145/*
58 * Register accessor functions 146 * Register accessor functions
59 */ 147 */
@@ -80,7 +168,8 @@ static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
80static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) 168static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
81{ 169{
82 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; 170 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
83 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 171 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
172 (ipi * MPIC_INFO(GREG_IPI_STRIDE));
84 173
85 if (mpic->flags & MPIC_BROKEN_IPI) 174 if (mpic->flags & MPIC_BROKEN_IPI)
86 be = !be; 175 be = !be;
@@ -89,7 +178,8 @@ static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
89 178
90static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) 179static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
91{ 180{
92 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 181 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
182 (ipi * MPIC_INFO(GREG_IPI_STRIDE));
93 183
94 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); 184 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
95} 185}
@@ -120,7 +210,7 @@ static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigne
120 unsigned int idx = src_no & mpic->isu_mask; 210 unsigned int idx = src_no & mpic->isu_mask;
121 211
122 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 212 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
123 reg + (idx * MPIC_IRQ_STRIDE)); 213 reg + (idx * MPIC_INFO(IRQ_STRIDE)));
124} 214}
125 215
126static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 216static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
@@ -130,7 +220,7 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
130 unsigned int idx = src_no & mpic->isu_mask; 220 unsigned int idx = src_no & mpic->isu_mask;
131 221
132 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 222 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
133 reg + (idx * MPIC_IRQ_STRIDE), value); 223 reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
134} 224}
135 225
136#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) 226#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
@@ -156,8 +246,8 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
156{ 246{
157 u32 r; 247 u32 r;
158 248
159 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); 249 mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
160 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); 250 r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
161 251
162 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { 252 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
163 printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); 253 printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
@@ -394,8 +484,8 @@ static inline struct mpic * mpic_from_irq(unsigned int irq)
394/* Send an EOI */ 484/* Send an EOI */
395static inline void mpic_eoi(struct mpic *mpic) 485static inline void mpic_eoi(struct mpic *mpic)
396{ 486{
397 mpic_cpu_write(MPIC_CPU_EOI, 0); 487 mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
398 (void)mpic_cpu_read(MPIC_CPU_WHOAMI); 488 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
399} 489}
400 490
401#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
@@ -419,8 +509,8 @@ static void mpic_unmask_irq(unsigned int irq)
419 509
420 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 510 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
421 511
422 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 512 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
423 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & 513 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
424 ~MPIC_VECPRI_MASK); 514 ~MPIC_VECPRI_MASK);
425 /* make sure mask gets to controller before we return to user */ 515 /* make sure mask gets to controller before we return to user */
426 do { 516 do {
@@ -428,7 +518,7 @@ static void mpic_unmask_irq(unsigned int irq)
428 printk(KERN_ERR "mpic_enable_irq timeout\n"); 518 printk(KERN_ERR "mpic_enable_irq timeout\n");
429 break; 519 break;
430 } 520 }
431 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 521 } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
432} 522}
433 523
434static void mpic_mask_irq(unsigned int irq) 524static void mpic_mask_irq(unsigned int irq)
@@ -439,8 +529,8 @@ static void mpic_mask_irq(unsigned int irq)
439 529
440 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 530 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
441 531
442 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 532 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
443 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | 533 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
444 MPIC_VECPRI_MASK); 534 MPIC_VECPRI_MASK);
445 535
446 /* make sure mask gets to controller before we return to user */ 536 /* make sure mask gets to controller before we return to user */
@@ -449,7 +539,7 @@ static void mpic_mask_irq(unsigned int irq)
449 printk(KERN_ERR "mpic_enable_irq timeout\n"); 539 printk(KERN_ERR "mpic_enable_irq timeout\n");
450 break; 540 break;
451 } 541 }
452 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 542 } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
453} 543}
454 544
455static void mpic_end_irq(unsigned int irq) 545static void mpic_end_irq(unsigned int irq)
@@ -560,24 +650,28 @@ static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
560 650
561 cpus_and(tmp, cpumask, cpu_online_map); 651 cpus_and(tmp, cpumask, cpu_online_map);
562 652
563 mpic_irq_write(src, MPIC_IRQ_DESTINATION, 653 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
564 mpic_physmask(cpus_addr(tmp)[0])); 654 mpic_physmask(cpus_addr(tmp)[0]));
565} 655}
566 656
567static unsigned int mpic_type_to_vecpri(unsigned int type) 657static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
568{ 658{
569 /* Now convert sense value */ 659 /* Now convert sense value */
570 switch(type & IRQ_TYPE_SENSE_MASK) { 660 switch(type & IRQ_TYPE_SENSE_MASK) {
571 case IRQ_TYPE_EDGE_RISING: 661 case IRQ_TYPE_EDGE_RISING:
572 return MPIC_VECPRI_SENSE_EDGE | MPIC_VECPRI_POLARITY_POSITIVE; 662 return MPIC_INFO(VECPRI_SENSE_EDGE) |
663 MPIC_INFO(VECPRI_POLARITY_POSITIVE);
573 case IRQ_TYPE_EDGE_FALLING: 664 case IRQ_TYPE_EDGE_FALLING:
574 case IRQ_TYPE_EDGE_BOTH: 665 case IRQ_TYPE_EDGE_BOTH:
575 return MPIC_VECPRI_SENSE_EDGE | MPIC_VECPRI_POLARITY_NEGATIVE; 666 return MPIC_INFO(VECPRI_SENSE_EDGE) |
667 MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
576 case IRQ_TYPE_LEVEL_HIGH: 668 case IRQ_TYPE_LEVEL_HIGH:
577 return MPIC_VECPRI_SENSE_LEVEL | MPIC_VECPRI_POLARITY_POSITIVE; 669 return MPIC_INFO(VECPRI_SENSE_LEVEL) |
670 MPIC_INFO(VECPRI_POLARITY_POSITIVE);
578 case IRQ_TYPE_LEVEL_LOW: 671 case IRQ_TYPE_LEVEL_LOW:
579 default: 672 default:
580 return MPIC_VECPRI_SENSE_LEVEL | MPIC_VECPRI_POLARITY_NEGATIVE; 673 return MPIC_INFO(VECPRI_SENSE_LEVEL) |
674 MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
581 } 675 }
582} 676}
583 677
@@ -609,13 +703,14 @@ static int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
609 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | 703 vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
610 MPIC_VECPRI_SENSE_EDGE; 704 MPIC_VECPRI_SENSE_EDGE;
611 else 705 else
612 vecpri = mpic_type_to_vecpri(flow_type); 706 vecpri = mpic_type_to_vecpri(mpic, flow_type);
613 707
614 vold = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI); 708 vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
615 vnew = vold & ~(MPIC_VECPRI_POLARITY_MASK | MPIC_VECPRI_SENSE_MASK); 709 vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
710 MPIC_INFO(VECPRI_SENSE_MASK));
616 vnew |= vecpri; 711 vnew |= vecpri;
617 if (vold != vnew) 712 if (vold != vnew)
618 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, vnew); 713 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
619 714
620 return 0; 715 return 0;
621} 716}
@@ -798,17 +893,22 @@ struct mpic * __init mpic_alloc(struct device_node *node,
798 mpic->irq_count = irq_count; 893 mpic->irq_count = irq_count;
799 mpic->num_sources = 0; /* so far */ 894 mpic->num_sources = 0; /* so far */
800 895
896#ifdef CONFIG_MPIC_WEIRD
897 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
898#endif
899
801 /* Map the global registers */ 900 /* Map the global registers */
802 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); 901 mpic->gregs = ioremap(phys_addr + MPIC_INFO(GREG_BASE), 0x1000);
803 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); 902 mpic->tmregs = mpic->gregs +
903 ((MPIC_INFO(TIMER_BASE) - MPIC_INFO(GREG_BASE)) >> 2);
804 BUG_ON(mpic->gregs == NULL); 904 BUG_ON(mpic->gregs == NULL);
805 905
806 /* Reset */ 906 /* Reset */
807 if (flags & MPIC_WANTS_RESET) { 907 if (flags & MPIC_WANTS_RESET) {
808 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 908 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
809 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 909 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
810 | MPIC_GREG_GCONF_RESET); 910 | MPIC_GREG_GCONF_RESET);
811 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 911 while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
812 & MPIC_GREG_GCONF_RESET) 912 & MPIC_GREG_GCONF_RESET)
813 mb(); 913 mb();
814 } 914 }
@@ -817,7 +917,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
817 * MPICs, num sources as well. On ISU MPICs, sources are counted 917 * MPICs, num sources as well. On ISU MPICs, sources are counted
818 * as ISUs are added 918 * as ISUs are added
819 */ 919 */
820 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); 920 reg = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
821 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) 921 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
822 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; 922 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
823 if (isu_size == 0) 923 if (isu_size == 0)
@@ -826,16 +926,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
826 926
827 /* Map the per-CPU registers */ 927 /* Map the per-CPU registers */
828 for (i = 0; i < mpic->num_cpus; i++) { 928 for (i = 0; i < mpic->num_cpus; i++) {
829 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + 929 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_INFO(CPU_BASE) +
830 i * MPIC_CPU_STRIDE, 0x1000); 930 i * MPIC_INFO(CPU_STRIDE), 0x1000);
831 BUG_ON(mpic->cpuregs[i] == NULL); 931 BUG_ON(mpic->cpuregs[i] == NULL);
832 } 932 }
833 933
834 /* Initialize main ISU if none provided */ 934 /* Initialize main ISU if none provided */
835 if (mpic->isu_size == 0) { 935 if (mpic->isu_size == 0) {
836 mpic->isu_size = mpic->num_sources; 936 mpic->isu_size = mpic->num_sources;
837 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, 937 mpic->isus[0] = ioremap(phys_addr + MPIC_INFO(IRQ_BASE),
838 MPIC_IRQ_STRIDE * mpic->isu_size); 938 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
839 BUG_ON(mpic->isus[0] == NULL); 939 BUG_ON(mpic->isus[0] == NULL);
840 } 940 }
841 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 941 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
@@ -879,7 +979,8 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
879 979
880 BUG_ON(isu_num >= MPIC_MAX_ISU); 980 BUG_ON(isu_num >= MPIC_MAX_ISU);
881 981
882 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); 982 mpic->isus[isu_num] = ioremap(phys_addr,
983 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
883 if ((isu_first + mpic->isu_size) > mpic->num_sources) 984 if ((isu_first + mpic->isu_size) > mpic->num_sources)
884 mpic->num_sources = isu_first + mpic->isu_size; 985 mpic->num_sources = isu_first + mpic->isu_size;
885} 986}
@@ -904,14 +1005,16 @@ void __init mpic_init(struct mpic *mpic)
904 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 1005 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
905 1006
906 /* Set current processor priority to max */ 1007 /* Set current processor priority to max */
907 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 1008 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
908 1009
909 /* Initialize timers: just disable them all */ 1010 /* Initialize timers: just disable them all */
910 for (i = 0; i < 4; i++) { 1011 for (i = 0; i < 4; i++) {
911 mpic_write(mpic->tmregs, 1012 mpic_write(mpic->tmregs,
912 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); 1013 i * MPIC_INFO(TIMER_STRIDE) +
1014 MPIC_INFO(TIMER_DESTINATION), 0);
913 mpic_write(mpic->tmregs, 1015 mpic_write(mpic->tmregs,
914 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, 1016 i * MPIC_INFO(TIMER_STRIDE) +
1017 MPIC_INFO(TIMER_VECTOR_PRI),
915 MPIC_VECPRI_MASK | 1018 MPIC_VECPRI_MASK |
916 (MPIC_VEC_TIMER_0 + i)); 1019 (MPIC_VEC_TIMER_0 + i));
917 } 1020 }
@@ -940,21 +1043,22 @@ void __init mpic_init(struct mpic *mpic)
940 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1043 (8 << MPIC_VECPRI_PRIORITY_SHIFT);
941 1044
942 /* init hw */ 1045 /* init hw */
943 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 1046 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
944 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 1047 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
945 1 << hard_smp_processor_id()); 1048 1 << hard_smp_processor_id());
946 } 1049 }
947 1050
948 /* Init spurrious vector */ 1051 /* Init spurrious vector */
949 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); 1052 mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), MPIC_VEC_SPURRIOUS);
950 1053
951 /* Disable 8259 passthrough */ 1054 /* Disable 8259 passthrough, if supported */
952 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 1055 if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
953 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 1056 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
954 | MPIC_GREG_GCONF_8259_PTHROU_DIS); 1057 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1058 | MPIC_GREG_GCONF_8259_PTHROU_DIS);
955 1059
956 /* Set current processor priority to 0 */ 1060 /* Set current processor priority to 0 */
957 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 1061 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
958} 1062}
959 1063
960void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) 1064void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
@@ -997,9 +1101,9 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
997 mpic_ipi_write(src - MPIC_VEC_IPI_0, 1101 mpic_ipi_write(src - MPIC_VEC_IPI_0,
998 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1102 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
999 } else { 1103 } else {
1000 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) 1104 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1001 & ~MPIC_VECPRI_PRIORITY_MASK; 1105 & ~MPIC_VECPRI_PRIORITY_MASK;
1002 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 1106 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
1003 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1107 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1004 } 1108 }
1005 spin_unlock_irqrestore(&mpic_lock, flags); 1109 spin_unlock_irqrestore(&mpic_lock, flags);
@@ -1017,7 +1121,7 @@ unsigned int mpic_irq_get_priority(unsigned int irq)
1017 if (is_ipi) 1121 if (is_ipi)
1018 reg = mpic_ipi_read(src = MPIC_VEC_IPI_0); 1122 reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
1019 else 1123 else
1020 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI); 1124 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
1021 spin_unlock_irqrestore(&mpic_lock, flags); 1125 spin_unlock_irqrestore(&mpic_lock, flags);
1022 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; 1126 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
1023} 1127}
@@ -1043,12 +1147,12 @@ void mpic_setup_this_cpu(void)
1043 */ 1147 */
1044 if (distribute_irqs) { 1148 if (distribute_irqs) {
1045 for (i = 0; i < mpic->num_sources ; i++) 1149 for (i = 0; i < mpic->num_sources ; i++)
1046 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 1150 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1047 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); 1151 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
1048 } 1152 }
1049 1153
1050 /* Set current processor priority to 0 */ 1154 /* Set current processor priority to 0 */
1051 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 1155 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
1052 1156
1053 spin_unlock_irqrestore(&mpic_lock, flags); 1157 spin_unlock_irqrestore(&mpic_lock, flags);
1054#endif /* CONFIG_SMP */ 1158#endif /* CONFIG_SMP */
@@ -1058,7 +1162,7 @@ int mpic_cpu_get_priority(void)
1058{ 1162{
1059 struct mpic *mpic = mpic_primary; 1163 struct mpic *mpic = mpic_primary;
1060 1164
1061 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); 1165 return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
1062} 1166}
1063 1167
1064void mpic_cpu_set_priority(int prio) 1168void mpic_cpu_set_priority(int prio)
@@ -1066,7 +1170,7 @@ void mpic_cpu_set_priority(int prio)
1066 struct mpic *mpic = mpic_primary; 1170 struct mpic *mpic = mpic_primary;
1067 1171
1068 prio &= MPIC_CPU_TASKPRI_MASK; 1172 prio &= MPIC_CPU_TASKPRI_MASK;
1069 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); 1173 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
1070} 1174}
1071 1175
1072/* 1176/*
@@ -1088,11 +1192,11 @@ void mpic_teardown_this_cpu(int secondary)
1088 1192
1089 /* let the mpic know we don't want intrs. */ 1193 /* let the mpic know we don't want intrs. */
1090 for (i = 0; i < mpic->num_sources ; i++) 1194 for (i = 0; i < mpic->num_sources ; i++)
1091 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 1195 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1092 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); 1196 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
1093 1197
1094 /* Set current processor priority to max */ 1198 /* Set current processor priority to max */
1095 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 1199 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1096 1200
1097 spin_unlock_irqrestore(&mpic_lock, flags); 1201 spin_unlock_irqrestore(&mpic_lock, flags);
1098} 1202}
@@ -1108,7 +1212,8 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
1108 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); 1212 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
1109#endif 1213#endif
1110 1214
1111 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, 1215 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1216 ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
1112 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); 1217 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
1113} 1218}
1114 1219
@@ -1116,7 +1221,7 @@ unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
1116{ 1221{
1117 u32 src; 1222 u32 src;
1118 1223
1119 src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 1224 src = mpic_cpu_read(MPIC_INFO(CPU_INTACK)) & MPIC_INFO(VECPRI_VECTOR_MASK);
1120#ifdef DEBUG_LOW 1225#ifdef DEBUG_LOW
1121 DBG("%s: get_one_irq(): %d\n", mpic->name, src); 1226 DBG("%s: get_one_irq(): %d\n", mpic->name, src);
1122#endif 1227#endif
diff --git a/arch/ppc/kernel/smp-tbsync.c b/arch/ppc/kernel/smp-tbsync.c
index 1576758deba..d0cf3f86931 100644
--- a/arch/ppc/kernel/smp-tbsync.c
+++ b/arch/ppc/kernel/smp-tbsync.c
@@ -47,8 +47,9 @@ void __devinit
47smp_generic_take_timebase( void ) 47smp_generic_take_timebase( void )
48{ 48{
49 int cmd, tbl, tbu; 49 int cmd, tbl, tbu;
50 unsigned long flags;
50 51
51 local_irq_disable(); 52 local_irq_save(flags);
52 while( !running ) 53 while( !running )
53 ; 54 ;
54 rmb(); 55 rmb();
@@ -64,7 +65,7 @@ smp_generic_take_timebase( void )
64 tbu = tbsync->tbu; 65 tbu = tbsync->tbu;
65 tbsync->ack = 0; 66 tbsync->ack = 0;
66 if( cmd == kExit ) 67 if( cmd == kExit )
67 return; 68 break;
68 69
69 if( cmd == kSetAndTest ) { 70 if( cmd == kSetAndTest ) {
70 while( tbsync->handshake ) 71 while( tbsync->handshake )
@@ -77,7 +78,7 @@ smp_generic_take_timebase( void )
77 } 78 }
78 enter_contest( tbsync->mark, -1 ); 79 enter_contest( tbsync->mark, -1 );
79 } 80 }
80 local_irq_enable(); 81 local_irq_restore(flags);
81} 82}
82 83
83static int __devinit 84static int __devinit
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index d90cd24d018..94badafe4ef 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -29,6 +29,7 @@
29#include <linux/initrd.h> 29#include <linux/initrd.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/fsl_devices.h> 31#include <linux/fsl_devices.h>
32#include <linux/fs_enet_pd.h>
32 33
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -58,6 +59,71 @@
58 * Setup the architecture 59 * Setup the architecture
59 * 60 *
60 */ 61 */
62static void init_fcc_ioports(void)
63{
64 struct immap *immap;
65 struct io_port *io;
66 u32 tempval;
67
68 immap = cpm2_immr;
69
70 io = &immap->im_ioport;
71 /* FCC2/3 are on the ports B/C. */
72 tempval = in_be32(&io->iop_pdirb);
73 tempval &= ~PB2_DIRB0;
74 tempval |= PB2_DIRB1;
75 out_be32(&io->iop_pdirb, tempval);
76
77 tempval = in_be32(&io->iop_psorb);
78 tempval &= ~PB2_PSORB0;
79 tempval |= PB2_PSORB1;
80 out_be32(&io->iop_psorb, tempval);
81
82 tempval = in_be32(&io->iop_pparb);
83 tempval |= (PB2_DIRB0 | PB2_DIRB1);
84 out_be32(&io->iop_pparb, tempval);
85
86 tempval = in_be32(&io->iop_pdirb);
87 tempval &= ~PB3_DIRB0;
88 tempval |= PB3_DIRB1;
89 out_be32(&io->iop_pdirb, tempval);
90
91 tempval = in_be32(&io->iop_psorb);
92 tempval &= ~PB3_PSORB0;
93 tempval |= PB3_PSORB1;
94 out_be32(&io->iop_psorb, tempval);
95
96 tempval = in_be32(&io->iop_pparb);
97 tempval |= (PB3_DIRB0 | PB3_DIRB1);
98 out_be32(&io->iop_pparb, tempval);
99
100 tempval = in_be32(&io->iop_pdirc);
101 tempval |= PC3_DIRC1;
102 out_be32(&io->iop_pdirc, tempval);
103
104 tempval = in_be32(&io->iop_pparc);
105 tempval |= PC3_DIRC1;
106 out_be32(&io->iop_pparc, tempval);
107
108 /* Port C has clocks...... */
109 tempval = in_be32(&io->iop_psorc);
110 tempval &= ~(CLK_TRX);
111 out_be32(&io->iop_psorc, tempval);
112
113 tempval = in_be32(&io->iop_pdirc);
114 tempval &= ~(CLK_TRX);
115 out_be32(&io->iop_pdirc, tempval);
116 tempval = in_be32(&io->iop_pparc);
117 tempval |= (CLK_TRX);
118 out_be32(&io->iop_pparc, tempval);
119
120 /* Configure Serial Interface clock routing.
121 * First, clear all FCC bits to zero,
122 * then set the ones we want.
123 */
124 immap->im_cpmux.cmx_fcr &= ~(CPMUX_CLK_MASK);
125 immap->im_cpmux.cmx_fcr |= CPMUX_CLK_ROUTE;
126}
61 127
62static void __init 128static void __init
63mpc8560ads_setup_arch(void) 129mpc8560ads_setup_arch(void)
@@ -66,6 +132,7 @@ mpc8560ads_setup_arch(void)
66 unsigned int freq; 132 unsigned int freq;
67 struct gianfar_platform_data *pdata; 133 struct gianfar_platform_data *pdata;
68 struct gianfar_mdio_data *mdata; 134 struct gianfar_mdio_data *mdata;
135 struct fs_platform_info *fpi;
69 136
70 cpm2_reset(); 137 cpm2_reset();
71 138
@@ -110,6 +177,28 @@ mpc8560ads_setup_arch(void)
110 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 177 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
111 } 178 }
112 179
180 init_fcc_ioports();
181 ppc_sys_device_remove(MPC85xx_CPM_FCC1);
182
183 fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC2);
184 if (fpi) {
185 memcpy(fpi->macaddr, binfo->bi_enet2addr, 6);
186 fpi->bus_id = "0:02";
187 fpi->phy_addr = 2;
188 fpi->dpram_offset = (u32)cpm2_immr->im_dprambase;
189 fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[1];
190 }
191
192 fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC3);
193 if (fpi) {
194 memcpy(fpi->macaddr, binfo->bi_enet2addr, 6);
195 fpi->macaddr[5] += 1;
196 fpi->bus_id = "0:03";
197 fpi->phy_addr = 3;
198 fpi->dpram_offset = (u32)cpm2_immr->im_dprambase;
199 fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[2];
200 }
201
113#ifdef CONFIG_BLK_DEV_INITRD 202#ifdef CONFIG_BLK_DEV_INITRD
114 if (initrd_start) 203 if (initrd_start)
115 ROOT_DEV = Root_RAM0; 204 ROOT_DEV = Root_RAM0;
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
index abf32281655..c8c322fe368 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
@@ -45,4 +45,23 @@ extern void mpc85xx_ads_map_io(void) __init;
45 45
46#define MPC85XX_PCI1_IO_SIZE 0x01000000 46#define MPC85XX_PCI1_IO_SIZE 0x01000000
47 47
48/* FCC1 Clock Source Configuration. These can be
49 * redefined in the board specific file.
50 * Can only choose from CLK9-12 */
51#define F1_RXCLK 12
52#define F1_TXCLK 11
53
54/* FCC2 Clock Source Configuration. These can be
55 * redefined in the board specific file.
56 * Can only choose from CLK13-16 */
57#define F2_RXCLK 13
58#define F2_TXCLK 14
59
60/* FCC3 Clock Source Configuration. These can be
61 * redefined in the board specific file.
62 * Can only choose from CLK13-16 */
63#define F3_RXCLK 15
64#define F3_TXCLK 16
65
66
48#endif /* __MACH_MPC85XX_ADS_H__ */ 67#endif /* __MACH_MPC85XX_ADS_H__ */
diff --git a/arch/ppc/platforms/mpc8272ads_setup.c b/arch/ppc/platforms/mpc8272ads_setup.c
index abb7154de2c..2a35fe2b9b9 100644
--- a/arch/ppc/platforms/mpc8272ads_setup.c
+++ b/arch/ppc/platforms/mpc8272ads_setup.c
@@ -56,64 +56,51 @@ static struct fs_uart_platform_info mpc8272_uart_pdata[] = {
56 }, 56 },
57}; 57};
58 58
59static struct fs_mii_bus_info mii_bus_info = { 59static struct fs_mii_bb_platform_info m82xx_mii_bb_pdata = {
60 .method = fsmii_bitbang, 60 .mdio_dat.bit = 18,
61 .id = 0, 61 .mdio_dir.bit = 18,
62 .i.bitbang = { 62 .mdc_dat.bit = 19,
63 .mdio_port = fsiop_portc, 63 .delay = 1,
64 .mdio_bit = 18,
65 .mdc_port = fsiop_portc,
66 .mdc_bit = 19,
67 .delay = 1,
68 },
69};
70
71static struct fs_platform_info mpc82xx_fcc1_pdata = {
72 .fs_no = fsid_fcc1,
73 .cp_page = CPM_CR_FCC1_PAGE,
74 .cp_block = CPM_CR_FCC1_SBLOCK,
75 .clk_trx = (PC_F1RXCLK | PC_F1TXCLK),
76 .clk_route = CMX1_CLK_ROUTE,
77 .clk_mask = CMX1_CLK_MASK,
78 .init_ioports = init_fcc1_ioports,
79
80 .phy_addr = 0,
81#ifdef PHY_INTERRUPT
82 .phy_irq = PHY_INTERRUPT,
83#else
84 .phy_irq = -1;
85#endif
86 .mem_offset = FCC1_MEM_OFFSET,
87 .bus_info = &mii_bus_info,
88 .rx_ring = 32,
89 .tx_ring = 32,
90 .rx_copybreak = 240,
91 .use_napi = 0,
92 .napi_weight = 17,
93}; 64};
94 65
95static struct fs_platform_info mpc82xx_fcc2_pdata = { 66static struct fs_platform_info mpc82xx_enet_pdata[] = {
96 .fs_no = fsid_fcc2, 67 [fsid_fcc1] = {
97 .cp_page = CPM_CR_FCC2_PAGE, 68 .fs_no = fsid_fcc1,
98 .cp_block = CPM_CR_FCC2_SBLOCK, 69 .cp_page = CPM_CR_FCC1_PAGE,
99 .clk_trx = (PC_F2RXCLK | PC_F2TXCLK), 70 .cp_block = CPM_CR_FCC1_SBLOCK,
100 .clk_route = CMX2_CLK_ROUTE, 71
101 .clk_mask = CMX2_CLK_MASK, 72 .clk_trx = (PC_F1RXCLK | PC_F1TXCLK),
102 .init_ioports = init_fcc2_ioports, 73 .clk_route = CMX1_CLK_ROUTE,
103 74 .clk_mask = CMX1_CLK_MASK,
104 .phy_addr = 3, 75 .init_ioports = init_fcc1_ioports,
105#ifdef PHY_INTERRUPT 76
106 .phy_irq = PHY_INTERRUPT, 77 .mem_offset = FCC1_MEM_OFFSET,
107#else 78
108 .phy_irq = -1; 79 .rx_ring = 32,
109#endif 80 .tx_ring = 32,
110 .mem_offset = FCC2_MEM_OFFSET, 81 .rx_copybreak = 240,
111 .bus_info = &mii_bus_info, 82 .use_napi = 0,
112 .rx_ring = 32, 83 .napi_weight = 17,
113 .tx_ring = 32, 84 .bus_id = "0:00",
114 .rx_copybreak = 240, 85 },
115 .use_napi = 0, 86 [fsid_fcc2] = {
116 .napi_weight = 17, 87 .fs_no = fsid_fcc2,
88 .cp_page = CPM_CR_FCC2_PAGE,
89 .cp_block = CPM_CR_FCC2_SBLOCK,
90 .clk_trx = (PC_F2RXCLK | PC_F2TXCLK),
91 .clk_route = CMX2_CLK_ROUTE,
92 .clk_mask = CMX2_CLK_MASK,
93 .init_ioports = init_fcc2_ioports,
94
95 .mem_offset = FCC2_MEM_OFFSET,
96
97 .rx_ring = 32,
98 .tx_ring = 32,
99 .rx_copybreak = 240,
100 .use_napi = 0,
101 .napi_weight = 17,
102 .bus_id = "0:03",
103 },
117}; 104};
118 105
119static void init_fcc1_ioports(void) 106static void init_fcc1_ioports(void)
@@ -209,20 +196,21 @@ static void __init mpc8272ads_fixup_enet_pdata(struct platform_device *pdev,
209 bd_t* bi = (void*)__res; 196 bd_t* bi = (void*)__res;
210 int fs_no = fsid_fcc1+pdev->id-1; 197 int fs_no = fsid_fcc1+pdev->id-1;
211 198
212 mpc82xx_fcc1_pdata.dpram_offset = mpc82xx_fcc2_pdata.dpram_offset = (u32)cpm2_immr->im_dprambase; 199 if(fs_no > ARRAY_SIZE(mpc82xx_enet_pdata)) {
213 mpc82xx_fcc1_pdata.fcc_regs_c = mpc82xx_fcc2_pdata.fcc_regs_c = (u32)cpm2_immr->im_fcc_c; 200 return;
214
215 switch(fs_no) {
216 case fsid_fcc1:
217 memcpy(&mpc82xx_fcc1_pdata.macaddr,bi->bi_enetaddr,6);
218 pdev->dev.platform_data = &mpc82xx_fcc1_pdata;
219 break;
220 case fsid_fcc2:
221 memcpy(&mpc82xx_fcc2_pdata.macaddr,bi->bi_enetaddr,6);
222 mpc82xx_fcc2_pdata.macaddr[5] ^= 1;
223 pdev->dev.platform_data = &mpc82xx_fcc2_pdata;
224 break;
225 } 201 }
202
203 mpc82xx_enet_pdata[fs_no].dpram_offset=
204 (u32)cpm2_immr->im_dprambase;
205 mpc82xx_enet_pdata[fs_no].fcc_regs_c =
206 (u32)cpm2_immr->im_fcc_c;
207 memcpy(&mpc82xx_enet_pdata[fs_no].macaddr,bi->bi_enetaddr,6);
208
209 /* prevent dup mac */
210 if(fs_no == fsid_fcc2)
211 mpc82xx_enet_pdata[fs_no].macaddr[5] ^= 1;
212
213 pdev->dev.platform_data = &mpc82xx_enet_pdata[fs_no];
226} 214}
227 215
228static void mpc8272ads_fixup_uart_pdata(struct platform_device *pdev, 216static void mpc8272ads_fixup_uart_pdata(struct platform_device *pdev,
@@ -274,6 +262,29 @@ static void init_scc4_uart_ioports(void)
274 iounmap(immap); 262 iounmap(immap);
275} 263}
276 264
265static void __init mpc8272ads_fixup_mdio_pdata(struct platform_device *pdev,
266 int idx)
267{
268 m82xx_mii_bb_pdata.irq[0] = PHY_INTERRUPT;
269 m82xx_mii_bb_pdata.irq[1] = -1;
270 m82xx_mii_bb_pdata.irq[2] = -1;
271 m82xx_mii_bb_pdata.irq[3] = PHY_INTERRUPT;
272 m82xx_mii_bb_pdata.irq[31] = -1;
273
274
275 m82xx_mii_bb_pdata.mdio_dat.offset =
276 (u32)&cpm2_immr->im_ioport.iop_pdatc;
277
278 m82xx_mii_bb_pdata.mdio_dir.offset =
279 (u32)&cpm2_immr->im_ioport.iop_pdirc;
280
281 m82xx_mii_bb_pdata.mdc_dat.offset =
282 (u32)&cpm2_immr->im_ioport.iop_pdatc;
283
284
285 pdev->dev.platform_data = &m82xx_mii_bb_pdata;
286}
287
277static int mpc8272ads_platform_notify(struct device *dev) 288static int mpc8272ads_platform_notify(struct device *dev)
278{ 289{
279 static const struct platform_notify_dev_map dev_map[] = { 290 static const struct platform_notify_dev_map dev_map[] = {
@@ -286,6 +297,10 @@ static int mpc8272ads_platform_notify(struct device *dev)
286 .rtn = mpc8272ads_fixup_uart_pdata, 297 .rtn = mpc8272ads_fixup_uart_pdata,
287 }, 298 },
288 { 299 {
300 .bus_id = "fsl-bb-mdio",
301 .rtn = mpc8272ads_fixup_mdio_pdata,
302 },
303 {
289 .bus_id = NULL 304 .bus_id = NULL
290 } 305 }
291 }; 306 };
@@ -319,6 +334,7 @@ int __init mpc8272ads_init(void)
319 ppc_sys_device_enable(MPC82xx_CPM_SCC4); 334 ppc_sys_device_enable(MPC82xx_CPM_SCC4);
320#endif 335#endif
321 336
337 ppc_sys_device_enable(MPC82xx_MDIO_BB);
322 338
323 return 0; 339 return 0;
324} 340}
diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c
index f19b6167c77..e12cece4c9f 100644
--- a/arch/ppc/platforms/mpc866ads_setup.c
+++ b/arch/ppc/platforms/mpc866ads_setup.c
@@ -1,10 +1,10 @@
1/*arch/ppc/platforms/mpc885ads-setup.c 1/*arch/ppc/platforms/mpc866ads-setup.c
2 * 2 *
3 * Platform setup for the Freescale mpc885ads board 3 * Platform setup for the Freescale mpc866ads board
4 * 4 *
5 * Vitaly Bordug <vbordug@ru.mvista.com> 5 * Vitaly Bordug <vbordug@ru.mvista.com>
6 * 6 *
7 * Copyright 2005 MontaVista Software Inc. 7 * Copyright 2005-2006 MontaVista Software Inc.
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public License 9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any 10 * version 2. This program is licensed "as is" without any warranty of any
@@ -42,49 +42,36 @@ static void setup_scc1_ioports(void);
42static void setup_smc1_ioports(void); 42static void setup_smc1_ioports(void);
43static void setup_smc2_ioports(void); 43static void setup_smc2_ioports(void);
44 44
45static struct fs_mii_bus_info fec_mii_bus_info = { 45static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata;
46 .method = fsmii_fec,
47 .id = 0,
48};
49
50static struct fs_mii_bus_info scc_mii_bus_info = {
51 .method = fsmii_fixed,
52 .id = 0,
53 .i.fixed.speed = 10,
54 .i.fixed.duplex = 0,
55};
56 46
57static struct fs_platform_info mpc8xx_fec_pdata[] = { 47static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata;
58 {
59 .rx_ring = 128,
60 .tx_ring = 16,
61 .rx_copybreak = 240,
62 48
63 .use_napi = 1, 49static struct fs_platform_info mpc8xx_enet_pdata[] = {
64 .napi_weight = 17, 50 [fsid_fec1] = {
51 .rx_ring = 128,
52 .tx_ring = 16,
53 .rx_copybreak = 240,
65 54
66 .phy_addr = 15, 55 .use_napi = 1,
67 .phy_irq = -1, 56 .napi_weight = 17,
68 57
69 .use_rmii = 0, 58 .init_ioports = setup_fec1_ioports,
70 59
71 .bus_info = &fec_mii_bus_info, 60 .bus_id = "0:0f",
72 } 61 .has_phy = 1,
73}; 62 },
63 [fsid_scc1] = {
64 .rx_ring = 64,
65 .tx_ring = 8,
66 .rx_copybreak = 240,
67 .use_napi = 1,
68 .napi_weight = 17,
74 69
75static struct fs_platform_info mpc8xx_scc_pdata = {
76 .rx_ring = 64,
77 .tx_ring = 8,
78 .rx_copybreak = 240,
79 70
80 .use_napi = 1, 71 .init_ioports = setup_scc1_ioports,
81 .napi_weight = 17,
82
83 .phy_addr = -1,
84 .phy_irq = -1,
85
86 .bus_info = &scc_mii_bus_info,
87 72
73 .bus_id = "fixed@100:1",
74 },
88}; 75};
89 76
90static struct fs_uart_platform_info mpc866_uart_pdata[] = { 77static struct fs_uart_platform_info mpc866_uart_pdata[] = {
@@ -207,63 +194,6 @@ static void setup_scc1_ioports(void)
207 194
208} 195}
209 196
210static void mpc866ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
211{
212 struct fs_platform_info *fpi = pdev->dev.platform_data;
213
214 volatile cpm8xx_t *cp;
215 bd_t *bd = (bd_t *) __res;
216 char *e;
217 int i;
218
219 /* Get pointer to Communication Processor */
220 cp = cpmp;
221 switch (fs_no) {
222 case fsid_fec1:
223 fpi = &mpc8xx_fec_pdata[0];
224 fpi->init_ioports = &setup_fec1_ioports;
225
226 break;
227 case fsid_scc1:
228 fpi = &mpc8xx_scc_pdata;
229 fpi->init_ioports = &setup_scc1_ioports;
230
231 break;
232 default:
233 printk(KERN_WARNING"Device %s is not supported!\n", pdev->name);
234 return;
235 }
236
237 pdev->dev.platform_data = fpi;
238 fpi->fs_no = fs_no;
239
240 e = (unsigned char *)&bd->bi_enetaddr;
241 for (i = 0; i < 6; i++)
242 fpi->macaddr[i] = *e++;
243
244 fpi->macaddr[5 - pdev->id]++;
245
246}
247
248static void mpc866ads_fixup_fec_enet_pdata(struct platform_device *pdev,
249 int idx)
250{
251 /* This is for FEC devices only */
252 if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-fec")))
253 return;
254 mpc866ads_fixup_enet_pdata(pdev, fsid_fec1 + pdev->id - 1);
255}
256
257static void mpc866ads_fixup_scc_enet_pdata(struct platform_device *pdev,
258 int idx)
259{
260 /* This is for SCC devices only */
261 if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-scc")))
262 return;
263
264 mpc866ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1);
265}
266
267static void setup_smc1_ioports(void) 197static void setup_smc1_ioports(void)
268{ 198{
269 immap_t *immap = (immap_t *) IMAP_ADDR; 199 immap_t *immap = (immap_t *) IMAP_ADDR;
@@ -315,6 +245,56 @@ static void setup_smc2_ioports(void)
315 245
316} 246}
317 247
248static int ma_count = 0;
249
250static void mpc866ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
251{
252 struct fs_platform_info *fpi;
253
254 volatile cpm8xx_t *cp;
255 bd_t *bd = (bd_t *) __res;
256 char *e;
257 int i;
258
259 /* Get pointer to Communication Processor */
260 cp = cpmp;
261
262 if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) {
263 printk(KERN_ERR"No network-suitable #%d device on bus", fs_no);
264 return;
265 }
266
267
268 fpi = &mpc8xx_enet_pdata[fs_no];
269 fpi->fs_no = fs_no;
270 pdev->dev.platform_data = fpi;
271
272 e = (unsigned char *)&bd->bi_enetaddr;
273 for (i = 0; i < 6; i++)
274 fpi->macaddr[i] = *e++;
275
276 fpi->macaddr[5] += ma_count++;
277}
278
279static void mpc866ads_fixup_fec_enet_pdata(struct platform_device *pdev,
280 int idx)
281{
282 /* This is for FEC devices only */
283 if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-fec")))
284 return;
285 mpc866ads_fixup_enet_pdata(pdev, fsid_fec1 + pdev->id - 1);
286}
287
288static void mpc866ads_fixup_scc_enet_pdata(struct platform_device *pdev,
289 int idx)
290{
291 /* This is for SCC devices only */
292 if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-scc")))
293 return;
294
295 mpc866ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1);
296}
297
318static void __init mpc866ads_fixup_uart_pdata(struct platform_device *pdev, 298static void __init mpc866ads_fixup_uart_pdata(struct platform_device *pdev,
319 int idx) 299 int idx)
320{ 300{
@@ -359,6 +339,9 @@ static int mpc866ads_platform_notify(struct device *dev)
359 339
360int __init mpc866ads_init(void) 340int __init mpc866ads_init(void)
361{ 341{
342 bd_t *bd = (bd_t *) __res;
343 struct fs_mii_fec_platform_info* fmpi;
344
362 printk(KERN_NOTICE "mpc866ads: Init\n"); 345 printk(KERN_NOTICE "mpc866ads: Init\n");
363 346
364 platform_notify = mpc866ads_platform_notify; 347 platform_notify = mpc866ads_platform_notify;
@@ -366,11 +349,20 @@ int __init mpc866ads_init(void)
366 ppc_sys_device_initfunc(); 349 ppc_sys_device_initfunc();
367 ppc_sys_device_disable_all(); 350 ppc_sys_device_disable_all();
368 351
369#ifdef MPC8xx_SECOND_ETH_SCC1 352#ifdef CONFIG_MPC8xx_SECOND_ETH_SCC1
370 ppc_sys_device_enable(MPC8xx_CPM_SCC1); 353 ppc_sys_device_enable(MPC8xx_CPM_SCC1);
371#endif 354#endif
372 ppc_sys_device_enable(MPC8xx_CPM_FEC1); 355 ppc_sys_device_enable(MPC8xx_CPM_FEC1);
373 356
357 ppc_sys_device_enable(MPC8xx_MDIO_FEC);
358
359 fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data =
360 &mpc8xx_mdio_fec_pdata;
361
362 fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
363 /* No PHY interrupt line here */
364 fmpi->irq[0xf] = -1;
365
374/* Since either of the uarts could be used as console, they need to ready */ 366/* Since either of the uarts could be used as console, they need to ready */
375#ifdef CONFIG_SERIAL_CPM_SMC1 367#ifdef CONFIG_SERIAL_CPM_SMC1
376 ppc_sys_device_enable(MPC8xx_CPM_SMC1); 368 ppc_sys_device_enable(MPC8xx_CPM_SMC1);
@@ -381,6 +373,14 @@ int __init mpc866ads_init(void)
381 ppc_sys_device_enable(MPC8xx_CPM_SMC2); 373 ppc_sys_device_enable(MPC8xx_CPM_SMC2);
382 ppc_sys_device_setfunc(MPC8xx_CPM_SMC2, PPC_SYS_FUNC_UART); 374 ppc_sys_device_setfunc(MPC8xx_CPM_SMC2, PPC_SYS_FUNC_UART);
383#endif 375#endif
376 ppc_sys_device_enable(MPC8xx_MDIO_FEC);
377
378 fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data =
379 &mpc8xx_mdio_fec_pdata;
380
381 fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
382 /* No PHY interrupt line here */
383 fmpi->irq[0xf] = -1;
384 384
385 return 0; 385 return 0;
386} 386}
diff --git a/arch/ppc/platforms/mpc885ads_setup.c b/arch/ppc/platforms/mpc885ads_setup.c
index c1fc4a16fea..5dfa4e6c2af 100644
--- a/arch/ppc/platforms/mpc885ads_setup.c
+++ b/arch/ppc/platforms/mpc885ads_setup.c
@@ -38,7 +38,10 @@ extern unsigned char __res[];
38static void setup_smc1_ioports(void); 38static void setup_smc1_ioports(void);
39static void setup_smc2_ioports(void); 39static void setup_smc2_ioports(void);
40 40
41static void __init mpc885ads_scc_phy_init(char); 41static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata;
42static void setup_fec1_ioports(void);
43static void setup_fec2_ioports(void);
44static void setup_scc3_ioports(void);
42 45
43static struct fs_uart_platform_info mpc885_uart_pdata[] = { 46static struct fs_uart_platform_info mpc885_uart_pdata[] = {
44 [fsid_smc1_uart] = { 47 [fsid_smc1_uart] = {
@@ -61,23 +64,8 @@ static struct fs_uart_platform_info mpc885_uart_pdata[] = {
61 }, 64 },
62}; 65};
63 66
64static struct fs_mii_bus_info fec_mii_bus_info = { 67static struct fs_platform_info mpc8xx_enet_pdata[] = {
65 .method = fsmii_fec, 68 [fsid_fec1] = {
66 .id = 0,
67};
68
69static struct fs_mii_bus_info scc_mii_bus_info = {
70#ifdef CONFIG_SCC_ENET_8xx_FIXED
71 .method = fsmii_fixed,
72#else
73 .method = fsmii_fec,
74#endif
75
76 .id = 0,
77};
78
79static struct fs_platform_info mpc8xx_fec_pdata[] = {
80 {
81 .rx_ring = 128, 69 .rx_ring = 128,
82 .tx_ring = 16, 70 .tx_ring = 16,
83 .rx_copybreak = 240, 71 .rx_copybreak = 240,
@@ -85,11 +73,12 @@ static struct fs_platform_info mpc8xx_fec_pdata[] = {
85 .use_napi = 1, 73 .use_napi = 1,
86 .napi_weight = 17, 74 .napi_weight = 17,
87 75
88 .phy_addr = 0, 76 .init_ioports = setup_fec1_ioports,
89 .phy_irq = SIU_IRQ7,
90 77
91 .bus_info = &fec_mii_bus_info, 78 .bus_id = "0:00",
92 }, { 79 .has_phy = 1,
80 },
81 [fsid_fec2] = {
93 .rx_ring = 128, 82 .rx_ring = 128,
94 .tx_ring = 16, 83 .tx_ring = 16,
95 .rx_copybreak = 240, 84 .rx_copybreak = 240,
@@ -97,35 +86,32 @@ static struct fs_platform_info mpc8xx_fec_pdata[] = {
97 .use_napi = 1, 86 .use_napi = 1,
98 .napi_weight = 17, 87 .napi_weight = 17,
99 88
100 .phy_addr = 1, 89 .init_ioports = setup_fec2_ioports,
101 .phy_irq = SIU_IRQ7,
102
103 .bus_info = &fec_mii_bus_info,
104 }
105};
106 90
107static struct fs_platform_info mpc8xx_scc_pdata = { 91 .bus_id = "0:01",
108 .rx_ring = 64, 92 .has_phy = 1,
109 .tx_ring = 8, 93 },
110 .rx_copybreak = 240, 94 [fsid_scc3] = {
95 .rx_ring = 64,
96 .tx_ring = 8,
97 .rx_copybreak = 240,
111 98
112 .use_napi = 1, 99 .use_napi = 1,
113 .napi_weight = 17, 100 .napi_weight = 17,
114 101
115 .phy_addr = 2, 102 .init_ioports = setup_scc3_ioports,
116#ifdef CONFIG_MPC8xx_SCC_ENET_FIXED 103#ifdef CONFIG_FIXED_MII_10_FDX
117 .phy_irq = -1, 104 .bus_id = "fixed@100:1",
118#else 105#else
119 .phy_irq = SIU_IRQ7, 106 .bus_id = "0:02",
120#endif 107 #endif
121 108 },
122 .bus_info = &scc_mii_bus_info,
123}; 109};
124 110
125void __init board_init(void) 111void __init board_init(void)
126{ 112{
127 volatile cpm8xx_t *cp = cpmp; 113 cpm8xx_t *cp = cpmp;
128 unsigned int *bcsr_io; 114 unsigned int *bcsr_io;
129 115
130#ifdef CONFIG_FS_ENET 116#ifdef CONFIG_FS_ENET
131 immap_t *immap = (immap_t *) IMAP_ADDR; 117 immap_t *immap = (immap_t *) IMAP_ADDR;
@@ -164,6 +150,14 @@ void __init board_init(void)
164 /* use MDC for MII (common) */ 150 /* use MDC for MII (common) */
165 setbits16(&immap->im_ioport.iop_pdpar, 0x0080); 151 setbits16(&immap->im_ioport.iop_pdpar, 0x0080);
166 clrbits16(&immap->im_ioport.iop_pddir, 0x0080); 152 clrbits16(&immap->im_ioport.iop_pddir, 0x0080);
153 bcsr_io = ioremap(BCSR5, sizeof(unsigned long));
154 clrbits32(bcsr_io,BCSR5_MII1_EN);
155 clrbits32(bcsr_io,BCSR5_MII1_RST);
156#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2
157 clrbits32(bcsr_io,BCSR5_MII2_EN);
158 clrbits32(bcsr_io,BCSR5_MII2_RST);
159#endif
160 iounmap(bcsr_io);
167#endif 161#endif
168} 162}
169 163
@@ -194,8 +188,8 @@ static void setup_fec2_ioports(void)
194 /* configure FEC2 pins */ 188 /* configure FEC2 pins */
195 setbits32(&immap->im_cpm.cp_pepar, 0x0003fffc); 189 setbits32(&immap->im_cpm.cp_pepar, 0x0003fffc);
196 setbits32(&immap->im_cpm.cp_pedir, 0x0003fffc); 190 setbits32(&immap->im_cpm.cp_pedir, 0x0003fffc);
197 setbits32(&immap->im_cpm.cp_peso, 0x00037800);
198 clrbits32(&immap->im_cpm.cp_peso, 0x000087fc); 191 clrbits32(&immap->im_cpm.cp_peso, 0x000087fc);
192 setbits32(&immap->im_cpm.cp_peso, 0x00037800);
199 clrbits32(&immap->im_cpm.cp_cptr, 0x00000080); 193 clrbits32(&immap->im_cpm.cp_cptr, 0x00000080);
200} 194}
201 195
@@ -213,6 +207,8 @@ static void setup_scc3_ioports(void)
213 207
214 /* Enable the PHY. 208 /* Enable the PHY.
215 */ 209 */
210 clrbits32(bcsr_io+4, BCSR4_ETH10_RST);
211 udelay(1000);
216 setbits32(bcsr_io+4, BCSR4_ETH10_RST); 212 setbits32(bcsr_io+4, BCSR4_ETH10_RST);
217 /* Configure port A pins for Txd and Rxd. 213 /* Configure port A pins for Txd and Rxd.
218 */ 214 */
@@ -254,37 +250,38 @@ static void setup_scc3_ioports(void)
254 clrbits32(&immap->im_cpm.cp_pedir, PE_ENET_TENA); 250 clrbits32(&immap->im_cpm.cp_pedir, PE_ENET_TENA);
255 setbits32(&immap->im_cpm.cp_peso, PE_ENET_TENA); 251 setbits32(&immap->im_cpm.cp_peso, PE_ENET_TENA);
256 252
257 setbits32(bcsr_io+1, BCSR1_ETHEN); 253 setbits32(bcsr_io+4, BCSR1_ETHEN);
258 iounmap(bcsr_io); 254 iounmap(bcsr_io);
259} 255}
260 256
257static int mac_count = 0;
258
261static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no) 259static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
262{ 260{
263 struct fs_platform_info *fpi = pdev->dev.platform_data; 261 struct fs_platform_info *fpi;
264
265 volatile cpm8xx_t *cp;
266 bd_t *bd = (bd_t *) __res; 262 bd_t *bd = (bd_t *) __res;
267 char *e; 263 char *e;
268 int i; 264 int i;
269 265
270 /* Get pointer to Communication Processor */ 266 if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) {
271 cp = cpmp; 267 printk(KERN_ERR"No network-suitable #%d device on bus", fs_no);
268 return;
269 }
270
271 fpi = &mpc8xx_enet_pdata[fs_no];
272
272 switch (fs_no) { 273 switch (fs_no) {
273 case fsid_fec1: 274 case fsid_fec1:
274 fpi = &mpc8xx_fec_pdata[0];
275 fpi->init_ioports = &setup_fec1_ioports; 275 fpi->init_ioports = &setup_fec1_ioports;
276 break; 276 break;
277 case fsid_fec2: 277 case fsid_fec2:
278 fpi = &mpc8xx_fec_pdata[1];
279 fpi->init_ioports = &setup_fec2_ioports; 278 fpi->init_ioports = &setup_fec2_ioports;
280 break; 279 break;
281 case fsid_scc3: 280 case fsid_scc3:
282 fpi = &mpc8xx_scc_pdata;
283 fpi->init_ioports = &setup_scc3_ioports; 281 fpi->init_ioports = &setup_scc3_ioports;
284 mpc885ads_scc_phy_init(fpi->phy_addr);
285 break; 282 break;
286 default: 283 default:
287 printk(KERN_WARNING"Device %s is not supported!\n", pdev->name); 284 printk(KERN_WARNING "Device %s is not supported!\n", pdev->name);
288 return; 285 return;
289 } 286 }
290 287
@@ -295,7 +292,7 @@ static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
295 for (i = 0; i < 6; i++) 292 for (i = 0; i < 6; i++)
296 fpi->macaddr[i] = *e++; 293 fpi->macaddr[i] = *e++;
297 294
298 fpi->macaddr[5 - pdev->id]++; 295 fpi->macaddr[5] += mac_count++;
299 296
300} 297}
301 298
@@ -318,58 +315,6 @@ static void __init mpc885ads_fixup_scc_enet_pdata(struct platform_device *pdev,
318 mpc885ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1); 315 mpc885ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1);
319} 316}
320 317
321/* SCC ethernet controller does not have MII management channel. FEC1 MII
322 * channel is used to communicate with the 10Mbit PHY.
323 */
324
325#define MII_ECNTRL_PINMUX 0x4
326#define FEC_ECNTRL_PINMUX 0x00000004
327#define FEC_RCNTRL_MII_MODE 0x00000004
328
329/* Make MII read/write commands.
330 */
331#define mk_mii_write(REG, VAL, PHY_ADDR) (0x50020000 | (((REG) & 0x1f) << 18) | \
332 ((VAL) & 0xffff) | ((PHY_ADDR) << 23))
333
334static void mpc885ads_scc_phy_init(char phy_addr)
335{
336 volatile immap_t *immap;
337 volatile fec_t *fecp;
338 bd_t *bd;
339
340 bd = (bd_t *) __res;
341 immap = (immap_t *) IMAP_ADDR; /* pointer to internal registers */
342 fecp = &(immap->im_cpm.cp_fec);
343
344 /* Enable MII pins of the FEC1
345 */
346 setbits16(&immap->im_ioport.iop_pdpar, 0x0080);
347 clrbits16(&immap->im_ioport.iop_pddir, 0x0080);
348 /* Set MII speed to 2.5 MHz
349 */
350 out_be32(&fecp->fec_mii_speed,
351 ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1);
352
353 /* Enable FEC pin MUX
354 */
355 setbits32(&fecp->fec_ecntrl, MII_ECNTRL_PINMUX);
356 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
357
358 out_be32(&fecp->fec_mii_data,
359 mk_mii_write(MII_BMCR, BMCR_ISOLATE, phy_addr));
360 udelay(100);
361 out_be32(&fecp->fec_mii_data,
362 mk_mii_write(MII_ADVERTISE,
363 ADVERTISE_10HALF | ADVERTISE_CSMA, phy_addr));
364 udelay(100);
365
366 /* Disable FEC MII settings
367 */
368 clrbits32(&fecp->fec_ecntrl, MII_ECNTRL_PINMUX);
369 clrbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
370 out_be32(&fecp->fec_mii_speed, 0);
371}
372
373static void setup_smc1_ioports(void) 318static void setup_smc1_ioports(void)
374{ 319{
375 immap_t *immap = (immap_t *) IMAP_ADDR; 320 immap_t *immap = (immap_t *) IMAP_ADDR;
@@ -462,6 +407,9 @@ static int mpc885ads_platform_notify(struct device *dev)
462 407
463int __init mpc885ads_init(void) 408int __init mpc885ads_init(void)
464{ 409{
410 struct fs_mii_fec_platform_info* fmpi;
411 bd_t *bd = (bd_t *) __res;
412
465 printk(KERN_NOTICE "mpc885ads: Init\n"); 413 printk(KERN_NOTICE "mpc885ads: Init\n");
466 414
467 platform_notify = mpc885ads_platform_notify; 415 platform_notify = mpc885ads_platform_notify;
@@ -471,8 +419,17 @@ int __init mpc885ads_init(void)
471 419
472 ppc_sys_device_enable(MPC8xx_CPM_FEC1); 420 ppc_sys_device_enable(MPC8xx_CPM_FEC1);
473 421
422 ppc_sys_device_enable(MPC8xx_MDIO_FEC);
423 fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data =
424 &mpc8xx_mdio_fec_pdata;
425
426 fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
427
428 /* No PHY interrupt line here */
429 fmpi->irq[0xf] = SIU_IRQ7;
430
474#ifdef CONFIG_MPC8xx_SECOND_ETH_SCC3 431#ifdef CONFIG_MPC8xx_SECOND_ETH_SCC3
475 ppc_sys_device_enable(MPC8xx_CPM_SCC1); 432 ppc_sys_device_enable(MPC8xx_CPM_SCC3);
476 433
477#endif 434#endif
478#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 435#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2
diff --git a/arch/ppc/platforms/pq2ads_pd.h b/arch/ppc/platforms/pq2ads_pd.h
index 8f14a43eafe..672483df807 100644
--- a/arch/ppc/platforms/pq2ads_pd.h
+++ b/arch/ppc/platforms/pq2ads_pd.h
@@ -29,86 +29,4 @@
29#define F3_RXCLK 13 29#define F3_RXCLK 13
30#define F3_TXCLK 14 30#define F3_TXCLK 14
31 31
32/* Automatically generates register configurations */
33#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */
34
35#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */
36#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */
37#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */
38#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */
39#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */
40#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */
41
42#define PC_F1RXCLK PC_CLK(F1_RXCLK)
43#define PC_F1TXCLK PC_CLK(F1_TXCLK)
44#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
45#define CMX1_CLK_MASK ((uint)0xff000000)
46
47#define PC_F2RXCLK PC_CLK(F2_RXCLK)
48#define PC_F2TXCLK PC_CLK(F2_TXCLK)
49#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
50#define CMX2_CLK_MASK ((uint)0x00ff0000)
51
52#define PC_F3RXCLK PC_CLK(F3_RXCLK)
53#define PC_F3TXCLK PC_CLK(F3_TXCLK)
54#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
55#define CMX3_CLK_MASK ((uint)0x0000ff00)
56
57/* I/O Pin assignment for FCC1. I don't yet know the best way to do this,
58 * but there is little variation among the choices.
59 */
60#define PA1_COL 0x00000001U
61#define PA1_CRS 0x00000002U
62#define PA1_TXER 0x00000004U
63#define PA1_TXEN 0x00000008U
64#define PA1_RXDV 0x00000010U
65#define PA1_RXER 0x00000020U
66#define PA1_TXDAT 0x00003c00U
67#define PA1_RXDAT 0x0003c000U
68#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT)
69#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
70 PA1_RXDV | PA1_RXER)
71#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
72#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER)
73
74
75/* I/O Pin assignment for FCC2. I don't yet know the best way to do this,
76 * but there is little variation among the choices.
77 */
78#define PB2_TXER 0x00000001U
79#define PB2_RXDV 0x00000002U
80#define PB2_TXEN 0x00000004U
81#define PB2_RXER 0x00000008U
82#define PB2_COL 0x00000010U
83#define PB2_CRS 0x00000020U
84#define PB2_TXDAT 0x000003c0U
85#define PB2_RXDAT 0x00003c00U
86#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
87 PB2_RXER | PB2_RXDV | PB2_TXER)
88#define PB2_PSORB1 (PB2_TXEN)
89#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
90#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER)
91
92
93/* I/O Pin assignment for FCC3. I don't yet know the best way to do this,
94 * but there is little variation among the choices.
95 */
96#define PB3_RXDV 0x00004000U
97#define PB3_RXER 0x00008000U
98#define PB3_TXER 0x00010000U
99#define PB3_TXEN 0x00020000U
100#define PB3_COL 0x00040000U
101#define PB3_CRS 0x00080000U
102#define PB3_TXDAT 0x0f000000U
103#define PB3_RXDAT 0x00f00000U
104#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
105 PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
106#define PB3_PSORB1 0
107#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
108#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER)
109
110#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128))
111#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0)
112#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
113
114#endif 32#endif
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index 2497bbc07e7..dca23f2ef85 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -93,7 +93,7 @@ obj-$(CONFIG_PCI) += pci_auto.o
93endif 93endif
94obj-$(CONFIG_RAPIDIO) += ppc85xx_rio.o 94obj-$(CONFIG_RAPIDIO) += ppc85xx_rio.o
95obj-$(CONFIG_83xx) += ppc83xx_setup.o ppc_sys.o \ 95obj-$(CONFIG_83xx) += ppc83xx_setup.o ppc_sys.o \
96 mpc83xx_sys.o mpc83xx_devices.o 96 mpc83xx_sys.o mpc83xx_devices.o ipic.o
97ifeq ($(CONFIG_83xx),y) 97ifeq ($(CONFIG_83xx),y)
98obj-$(CONFIG_PCI) += pci_auto.o 98obj-$(CONFIG_PCI) += pci_auto.o
99endif 99endif
diff --git a/arch/ppc/syslib/ipic.c b/arch/ppc/syslib/ipic.c
new file mode 100644
index 00000000000..46801f5ec03
--- /dev/null
+++ b/arch/ppc/syslib/ipic.c
@@ -0,0 +1,646 @@
1/*
2 * include/asm-ppc/ipic.c
3 *
4 * IPIC routines implementations.
5 *
6 * Copyright 2005 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/reboot.h>
17#include <linux/slab.h>
18#include <linux/stddef.h>
19#include <linux/sched.h>
20#include <linux/signal.h>
21#include <linux/sysdev.h>
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/ipic.h>
25#include <asm/mpc83xx.h>
26
27#include "ipic.h"
28
29static struct ipic p_ipic;
30static struct ipic * primary_ipic;
31
32static struct ipic_info ipic_info[] = {
33 [9] = {
34 .pend = IPIC_SIPNR_H,
35 .mask = IPIC_SIMSR_H,
36 .prio = IPIC_SIPRR_D,
37 .force = IPIC_SIFCR_H,
38 .bit = 24,
39 .prio_mask = 0,
40 },
41 [10] = {
42 .pend = IPIC_SIPNR_H,
43 .mask = IPIC_SIMSR_H,
44 .prio = IPIC_SIPRR_D,
45 .force = IPIC_SIFCR_H,
46 .bit = 25,
47 .prio_mask = 1,
48 },
49 [11] = {
50 .pend = IPIC_SIPNR_H,
51 .mask = IPIC_SIMSR_H,
52 .prio = IPIC_SIPRR_D,
53 .force = IPIC_SIFCR_H,
54 .bit = 26,
55 .prio_mask = 2,
56 },
57 [14] = {
58 .pend = IPIC_SIPNR_H,
59 .mask = IPIC_SIMSR_H,
60 .prio = IPIC_SIPRR_D,
61 .force = IPIC_SIFCR_H,
62 .bit = 29,
63 .prio_mask = 5,
64 },
65 [15] = {
66 .pend = IPIC_SIPNR_H,
67 .mask = IPIC_SIMSR_H,
68 .prio = IPIC_SIPRR_D,
69 .force = IPIC_SIFCR_H,
70 .bit = 30,
71 .prio_mask = 6,
72 },
73 [16] = {
74 .pend = IPIC_SIPNR_H,
75 .mask = IPIC_SIMSR_H,
76 .prio = IPIC_SIPRR_D,
77 .force = IPIC_SIFCR_H,
78 .bit = 31,
79 .prio_mask = 7,
80 },
81 [17] = {
82 .pend = IPIC_SEPNR,
83 .mask = IPIC_SEMSR,
84 .prio = IPIC_SMPRR_A,
85 .force = IPIC_SEFCR,
86 .bit = 1,
87 .prio_mask = 5,
88 },
89 [18] = {
90 .pend = IPIC_SEPNR,
91 .mask = IPIC_SEMSR,
92 .prio = IPIC_SMPRR_A,
93 .force = IPIC_SEFCR,
94 .bit = 2,
95 .prio_mask = 6,
96 },
97 [19] = {
98 .pend = IPIC_SEPNR,
99 .mask = IPIC_SEMSR,
100 .prio = IPIC_SMPRR_A,
101 .force = IPIC_SEFCR,
102 .bit = 3,
103 .prio_mask = 7,
104 },
105 [20] = {
106 .pend = IPIC_SEPNR,
107 .mask = IPIC_SEMSR,
108 .prio = IPIC_SMPRR_B,
109 .force = IPIC_SEFCR,
110 .bit = 4,
111 .prio_mask = 4,
112 },
113 [21] = {
114 .pend = IPIC_SEPNR,
115 .mask = IPIC_SEMSR,
116 .prio = IPIC_SMPRR_B,
117 .force = IPIC_SEFCR,
118 .bit = 5,
119 .prio_mask = 5,
120 },
121 [22] = {
122 .pend = IPIC_SEPNR,
123 .mask = IPIC_SEMSR,
124 .prio = IPIC_SMPRR_B,
125 .force = IPIC_SEFCR,
126 .bit = 6,
127 .prio_mask = 6,
128 },
129 [23] = {
130 .pend = IPIC_SEPNR,
131 .mask = IPIC_SEMSR,
132 .prio = IPIC_SMPRR_B,
133 .force = IPIC_SEFCR,
134 .bit = 7,
135 .prio_mask = 7,
136 },
137 [32] = {
138 .pend = IPIC_SIPNR_H,
139 .mask = IPIC_SIMSR_H,
140 .prio = IPIC_SIPRR_A,
141 .force = IPIC_SIFCR_H,
142 .bit = 0,
143 .prio_mask = 0,
144 },
145 [33] = {
146 .pend = IPIC_SIPNR_H,
147 .mask = IPIC_SIMSR_H,
148 .prio = IPIC_SIPRR_A,
149 .force = IPIC_SIFCR_H,
150 .bit = 1,
151 .prio_mask = 1,
152 },
153 [34] = {
154 .pend = IPIC_SIPNR_H,
155 .mask = IPIC_SIMSR_H,
156 .prio = IPIC_SIPRR_A,
157 .force = IPIC_SIFCR_H,
158 .bit = 2,
159 .prio_mask = 2,
160 },
161 [35] = {
162 .pend = IPIC_SIPNR_H,
163 .mask = IPIC_SIMSR_H,
164 .prio = IPIC_SIPRR_A,
165 .force = IPIC_SIFCR_H,
166 .bit = 3,
167 .prio_mask = 3,
168 },
169 [36] = {
170 .pend = IPIC_SIPNR_H,
171 .mask = IPIC_SIMSR_H,
172 .prio = IPIC_SIPRR_A,
173 .force = IPIC_SIFCR_H,
174 .bit = 4,
175 .prio_mask = 4,
176 },
177 [37] = {
178 .pend = IPIC_SIPNR_H,
179 .mask = IPIC_SIMSR_H,
180 .prio = IPIC_SIPRR_A,
181 .force = IPIC_SIFCR_H,
182 .bit = 5,
183 .prio_mask = 5,
184 },
185 [38] = {
186 .pend = IPIC_SIPNR_H,
187 .mask = IPIC_SIMSR_H,
188 .prio = IPIC_SIPRR_A,
189 .force = IPIC_SIFCR_H,
190 .bit = 6,
191 .prio_mask = 6,
192 },
193 [39] = {
194 .pend = IPIC_SIPNR_H,
195 .mask = IPIC_SIMSR_H,
196 .prio = IPIC_SIPRR_A,
197 .force = IPIC_SIFCR_H,
198 .bit = 7,
199 .prio_mask = 7,
200 },
201 [48] = {
202 .pend = IPIC_SEPNR,
203 .mask = IPIC_SEMSR,
204 .prio = IPIC_SMPRR_A,
205 .force = IPIC_SEFCR,
206 .bit = 0,
207 .prio_mask = 4,
208 },
209 [64] = {
210 .pend = IPIC_SIPNR_H,
211 .mask = IPIC_SIMSR_L,
212 .prio = IPIC_SMPRR_A,
213 .force = IPIC_SIFCR_L,
214 .bit = 0,
215 .prio_mask = 0,
216 },
217 [65] = {
218 .pend = IPIC_SIPNR_H,
219 .mask = IPIC_SIMSR_L,
220 .prio = IPIC_SMPRR_A,
221 .force = IPIC_SIFCR_L,
222 .bit = 1,
223 .prio_mask = 1,
224 },
225 [66] = {
226 .pend = IPIC_SIPNR_H,
227 .mask = IPIC_SIMSR_L,
228 .prio = IPIC_SMPRR_A,
229 .force = IPIC_SIFCR_L,
230 .bit = 2,
231 .prio_mask = 2,
232 },
233 [67] = {
234 .pend = IPIC_SIPNR_H,
235 .mask = IPIC_SIMSR_L,
236 .prio = IPIC_SMPRR_A,
237 .force = IPIC_SIFCR_L,
238 .bit = 3,
239 .prio_mask = 3,
240 },
241 [68] = {
242 .pend = IPIC_SIPNR_H,
243 .mask = IPIC_SIMSR_L,
244 .prio = IPIC_SMPRR_B,
245 .force = IPIC_SIFCR_L,
246 .bit = 4,
247 .prio_mask = 0,
248 },
249 [69] = {
250 .pend = IPIC_SIPNR_H,
251 .mask = IPIC_SIMSR_L,
252 .prio = IPIC_SMPRR_B,
253 .force = IPIC_SIFCR_L,
254 .bit = 5,
255 .prio_mask = 1,
256 },
257 [70] = {
258 .pend = IPIC_SIPNR_H,
259 .mask = IPIC_SIMSR_L,
260 .prio = IPIC_SMPRR_B,
261 .force = IPIC_SIFCR_L,
262 .bit = 6,
263 .prio_mask = 2,
264 },
265 [71] = {
266 .pend = IPIC_SIPNR_H,
267 .mask = IPIC_SIMSR_L,
268 .prio = IPIC_SMPRR_B,
269 .force = IPIC_SIFCR_L,
270 .bit = 7,
271 .prio_mask = 3,
272 },
273 [72] = {
274 .pend = IPIC_SIPNR_H,
275 .mask = IPIC_SIMSR_L,
276 .prio = 0,
277 .force = IPIC_SIFCR_L,
278 .bit = 8,
279 },
280 [73] = {
281 .pend = IPIC_SIPNR_H,
282 .mask = IPIC_SIMSR_L,
283 .prio = 0,
284 .force = IPIC_SIFCR_L,
285 .bit = 9,
286 },
287 [74] = {
288 .pend = IPIC_SIPNR_H,
289 .mask = IPIC_SIMSR_L,
290 .prio = 0,
291 .force = IPIC_SIFCR_L,
292 .bit = 10,
293 },
294 [75] = {
295 .pend = IPIC_SIPNR_H,
296 .mask = IPIC_SIMSR_L,
297 .prio = 0,
298 .force = IPIC_SIFCR_L,
299 .bit = 11,
300 },
301 [76] = {
302 .pend = IPIC_SIPNR_H,
303 .mask = IPIC_SIMSR_L,
304 .prio = 0,
305 .force = IPIC_SIFCR_L,
306 .bit = 12,
307 },
308 [77] = {
309 .pend = IPIC_SIPNR_H,
310 .mask = IPIC_SIMSR_L,
311 .prio = 0,
312 .force = IPIC_SIFCR_L,
313 .bit = 13,
314 },
315 [78] = {
316 .pend = IPIC_SIPNR_H,
317 .mask = IPIC_SIMSR_L,
318 .prio = 0,
319 .force = IPIC_SIFCR_L,
320 .bit = 14,
321 },
322 [79] = {
323 .pend = IPIC_SIPNR_H,
324 .mask = IPIC_SIMSR_L,
325 .prio = 0,
326 .force = IPIC_SIFCR_L,
327 .bit = 15,
328 },
329 [80] = {
330 .pend = IPIC_SIPNR_H,
331 .mask = IPIC_SIMSR_L,
332 .prio = 0,
333 .force = IPIC_SIFCR_L,
334 .bit = 16,
335 },
336 [84] = {
337 .pend = IPIC_SIPNR_H,
338 .mask = IPIC_SIMSR_L,
339 .prio = 0,
340 .force = IPIC_SIFCR_L,
341 .bit = 20,
342 },
343 [85] = {
344 .pend = IPIC_SIPNR_H,
345 .mask = IPIC_SIMSR_L,
346 .prio = 0,
347 .force = IPIC_SIFCR_L,
348 .bit = 21,
349 },
350 [90] = {
351 .pend = IPIC_SIPNR_H,
352 .mask = IPIC_SIMSR_L,
353 .prio = 0,
354 .force = IPIC_SIFCR_L,
355 .bit = 26,
356 },
357 [91] = {
358 .pend = IPIC_SIPNR_H,
359 .mask = IPIC_SIMSR_L,
360 .prio = 0,
361 .force = IPIC_SIFCR_L,
362 .bit = 27,
363 },
364};
365
366static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
367{
368 return in_be32(base + (reg >> 2));
369}
370
371static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
372{
373 out_be32(base + (reg >> 2), value);
374}
375
376static inline struct ipic * ipic_from_irq(unsigned int irq)
377{
378 return primary_ipic;
379}
380
381static void ipic_enable_irq(unsigned int irq)
382{
383 struct ipic *ipic = ipic_from_irq(irq);
384 unsigned int src = irq - ipic->irq_offset;
385 u32 temp;
386
387 temp = ipic_read(ipic->regs, ipic_info[src].mask);
388 temp |= (1 << (31 - ipic_info[src].bit));
389 ipic_write(ipic->regs, ipic_info[src].mask, temp);
390}
391
392static void ipic_disable_irq(unsigned int irq)
393{
394 struct ipic *ipic = ipic_from_irq(irq);
395 unsigned int src = irq - ipic->irq_offset;
396 u32 temp;
397
398 temp = ipic_read(ipic->regs, ipic_info[src].mask);
399 temp &= ~(1 << (31 - ipic_info[src].bit));
400 ipic_write(ipic->regs, ipic_info[src].mask, temp);
401}
402
403static void ipic_disable_irq_and_ack(unsigned int irq)
404{
405 struct ipic *ipic = ipic_from_irq(irq);
406 unsigned int src = irq - ipic->irq_offset;
407 u32 temp;
408
409 ipic_disable_irq(irq);
410
411 temp = ipic_read(ipic->regs, ipic_info[src].pend);
412 temp |= (1 << (31 - ipic_info[src].bit));
413 ipic_write(ipic->regs, ipic_info[src].pend, temp);
414}
415
416static void ipic_end_irq(unsigned int irq)
417{
418 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
419 ipic_enable_irq(irq);
420}
421
422struct hw_interrupt_type ipic = {
423 .typename = " IPIC ",
424 .enable = ipic_enable_irq,
425 .disable = ipic_disable_irq,
426 .ack = ipic_disable_irq_and_ack,
427 .end = ipic_end_irq,
428};
429
430void __init ipic_init(phys_addr_t phys_addr,
431 unsigned int flags,
432 unsigned int irq_offset,
433 unsigned char *senses,
434 unsigned int senses_count)
435{
436 u32 i, temp = 0;
437
438 primary_ipic = &p_ipic;
439 primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE);
440
441 primary_ipic->irq_offset = irq_offset;
442
443 ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0);
444
445 /* default priority scheme is grouped. If spread mode is required
446 * configure SICFR accordingly */
447 if (flags & IPIC_SPREADMODE_GRP_A)
448 temp |= SICFR_IPSA;
449 if (flags & IPIC_SPREADMODE_GRP_D)
450 temp |= SICFR_IPSD;
451 if (flags & IPIC_SPREADMODE_MIX_A)
452 temp |= SICFR_MPSA;
453 if (flags & IPIC_SPREADMODE_MIX_B)
454 temp |= SICFR_MPSB;
455
456 ipic_write(primary_ipic->regs, IPIC_SICNR, temp);
457
458 /* handle MCP route */
459 temp = 0;
460 if (flags & IPIC_DISABLE_MCP_OUT)
461 temp = SERCR_MCPR;
462 ipic_write(primary_ipic->regs, IPIC_SERCR, temp);
463
464 /* handle routing of IRQ0 to MCP */
465 temp = ipic_read(primary_ipic->regs, IPIC_SEMSR);
466
467 if (flags & IPIC_IRQ0_MCP)
468 temp |= SEMSR_SIRQ0;
469 else
470 temp &= ~SEMSR_SIRQ0;
471
472 ipic_write(primary_ipic->regs, IPIC_SEMSR, temp);
473
474 for (i = 0 ; i < NR_IPIC_INTS ; i++) {
475 irq_desc[i+irq_offset].chip = &ipic;
476 irq_desc[i+irq_offset].status = IRQ_LEVEL;
477 }
478
479 temp = 0;
480 for (i = 0 ; i < senses_count ; i++) {
481 if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) {
482 temp |= 1 << (15 - i);
483 if (i != 0)
484 irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0;
485 else
486 irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0;
487 }
488 }
489 ipic_write(primary_ipic->regs, IPIC_SECNR, temp);
490
491 printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS,
492 senses_count, primary_ipic->regs);
493}
494
495int ipic_set_priority(unsigned int irq, unsigned int priority)
496{
497 struct ipic *ipic = ipic_from_irq(irq);
498 unsigned int src = irq - ipic->irq_offset;
499 u32 temp;
500
501 if (priority > 7)
502 return -EINVAL;
503 if (src > 127)
504 return -EINVAL;
505 if (ipic_info[src].prio == 0)
506 return -EINVAL;
507
508 temp = ipic_read(ipic->regs, ipic_info[src].prio);
509
510 if (priority < 4) {
511 temp &= ~(0x7 << (20 + (3 - priority) * 3));
512 temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
513 } else {
514 temp &= ~(0x7 << (4 + (7 - priority) * 3));
515 temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
516 }
517
518 ipic_write(ipic->regs, ipic_info[src].prio, temp);
519
520 return 0;
521}
522
523void ipic_set_highest_priority(unsigned int irq)
524{
525 struct ipic *ipic = ipic_from_irq(irq);
526 unsigned int src = irq - ipic->irq_offset;
527 u32 temp;
528
529 temp = ipic_read(ipic->regs, IPIC_SICFR);
530
531 /* clear and set HPI */
532 temp &= 0x7f000000;
533 temp |= (src & 0x7f) << 24;
534
535 ipic_write(ipic->regs, IPIC_SICFR, temp);
536}
537
538void ipic_set_default_priority(void)
539{
540 ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0);
541 ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1);
542 ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2);
543 ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3);
544 ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4);
545 ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5);
546 ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6);
547 ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7);
548
549 ipic_set_priority(MPC83xx_IRQ_UART1, 0);
550 ipic_set_priority(MPC83xx_IRQ_UART2, 1);
551 ipic_set_priority(MPC83xx_IRQ_SEC2, 2);
552 ipic_set_priority(MPC83xx_IRQ_IIC1, 5);
553 ipic_set_priority(MPC83xx_IRQ_IIC2, 6);
554 ipic_set_priority(MPC83xx_IRQ_SPI, 7);
555 ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0);
556 ipic_set_priority(MPC83xx_IRQ_PIT, 1);
557 ipic_set_priority(MPC83xx_IRQ_PCI1, 2);
558 ipic_set_priority(MPC83xx_IRQ_PCI2, 3);
559 ipic_set_priority(MPC83xx_IRQ_EXT0, 4);
560 ipic_set_priority(MPC83xx_IRQ_EXT1, 5);
561 ipic_set_priority(MPC83xx_IRQ_EXT2, 6);
562 ipic_set_priority(MPC83xx_IRQ_EXT3, 7);
563 ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0);
564 ipic_set_priority(MPC83xx_IRQ_MU, 1);
565 ipic_set_priority(MPC83xx_IRQ_SBA, 2);
566 ipic_set_priority(MPC83xx_IRQ_DMA, 3);
567 ipic_set_priority(MPC83xx_IRQ_EXT4, 4);
568 ipic_set_priority(MPC83xx_IRQ_EXT5, 5);
569 ipic_set_priority(MPC83xx_IRQ_EXT6, 6);
570 ipic_set_priority(MPC83xx_IRQ_EXT7, 7);
571}
572
573void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
574{
575 struct ipic *ipic = primary_ipic;
576 u32 temp;
577
578 temp = ipic_read(ipic->regs, IPIC_SERMR);
579 temp |= (1 << (31 - mcp_irq));
580 ipic_write(ipic->regs, IPIC_SERMR, temp);
581}
582
583void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
584{
585 struct ipic *ipic = primary_ipic;
586 u32 temp;
587
588 temp = ipic_read(ipic->regs, IPIC_SERMR);
589 temp &= (1 << (31 - mcp_irq));
590 ipic_write(ipic->regs, IPIC_SERMR, temp);
591}
592
593u32 ipic_get_mcp_status(void)
594{
595 return ipic_read(primary_ipic->regs, IPIC_SERMR);
596}
597
598void ipic_clear_mcp_status(u32 mask)
599{
600 ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
601}
602
603/* Return an interrupt vector or -1 if no interrupt is pending. */
604int ipic_get_irq(struct pt_regs *regs)
605{
606 int irq;
607
608 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f;
609
610 if (irq == 0) /* 0 --> no irq is pending */
611 irq = -1;
612
613 return irq;
614}
615
616static struct sysdev_class ipic_sysclass = {
617 set_kset_name("ipic"),
618};
619
620static struct sys_device device_ipic = {
621 .id = 0,
622 .cls = &ipic_sysclass,
623};
624
625static int __init init_ipic_sysfs(void)
626{
627 int rc;
628
629 if (!primary_ipic->regs)
630 return -ENODEV;
631 printk(KERN_DEBUG "Registering ipic with sysfs...\n");
632
633 rc = sysdev_class_register(&ipic_sysclass);
634 if (rc) {
635 printk(KERN_ERR "Failed registering ipic sys class\n");
636 return -ENODEV;
637 }
638 rc = sysdev_register(&device_ipic);
639 if (rc) {
640 printk(KERN_ERR "Failed registering ipic sys device\n");
641 return -ENODEV;
642 }
643 return 0;
644}
645
646subsys_initcall(init_ipic_sysfs);
diff --git a/arch/ppc/syslib/ipic.h b/arch/ppc/syslib/ipic.h
new file mode 100644
index 00000000000..a60c9d18bb7
--- /dev/null
+++ b/arch/ppc/syslib/ipic.h
@@ -0,0 +1,47 @@
1/*
2 * IPIC private definitions and structure.
3 *
4 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
5 *
6 * Copyright 2005 Freescale Semiconductor, Inc
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#ifndef __IPIC_H__
14#define __IPIC_H__
15
16#include <asm/ipic.h>
17
18#define MPC83xx_IPIC_SIZE (0x00100)
19
20/* System Global Interrupt Configuration Register */
21#define SICFR_IPSA 0x00010000
22#define SICFR_IPSD 0x00080000
23#define SICFR_MPSA 0x00200000
24#define SICFR_MPSB 0x00400000
25
26/* System External Interrupt Mask Register */
27#define SEMSR_SIRQ0 0x00008000
28
29/* System Error Control Register */
30#define SERCR_MCPR 0x00000001
31
32struct ipic {
33 volatile u32 __iomem *regs;
34 unsigned int irq_offset;
35};
36
37struct ipic_info {
38 u8 pend; /* pending register offset from base */
39 u8 mask; /* mask register offset from base */
40 u8 prio; /* priority register offset from base */
41 u8 force; /* force register offset from base */
42 u8 bit; /* register bit position (as per doc)
43 bit mask = 1 << (31 - bit) */
44 u8 prio_mask; /* priority mask value */
45};
46
47#endif /* __IPIC_H__ */
diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
index 7735336f5b8..325136e5aee 100644
--- a/arch/ppc/syslib/mpc85xx_devices.c
+++ b/arch/ppc/syslib/mpc85xx_devices.c
@@ -16,9 +16,11 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/serial_8250.h> 17#include <linux/serial_8250.h>
18#include <linux/fsl_devices.h> 18#include <linux/fsl_devices.h>
19#include <linux/fs_enet_pd.h>
19#include <asm/mpc85xx.h> 20#include <asm/mpc85xx.h>
20#include <asm/irq.h> 21#include <asm/irq.h>
21#include <asm/ppc_sys.h> 22#include <asm/ppc_sys.h>
23#include <asm/cpm2.h>
22 24
23/* We use offsets for IORESOURCE_MEM since we do not know at compile time 25/* We use offsets for IORESOURCE_MEM since we do not know at compile time
24 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup 26 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
@@ -82,6 +84,60 @@ static struct fsl_i2c_platform_data mpc85xx_fsl_i2c2_pdata = {
82 .device_flags = FSL_I2C_DEV_SEPARATE_DFSRR, 84 .device_flags = FSL_I2C_DEV_SEPARATE_DFSRR,
83}; 85};
84 86
87static struct fs_platform_info mpc85xx_fcc1_pdata = {
88 .fs_no = fsid_fcc1,
89 .cp_page = CPM_CR_FCC1_PAGE,
90 .cp_block = CPM_CR_FCC1_SBLOCK,
91
92 .rx_ring = 32,
93 .tx_ring = 32,
94 .rx_copybreak = 240,
95 .use_napi = 0,
96 .napi_weight = 17,
97
98 .clk_mask = CMX1_CLK_MASK,
99 .clk_route = CMX1_CLK_ROUTE,
100 .clk_trx = (PC_F1RXCLK | PC_F1TXCLK),
101
102 .mem_offset = FCC1_MEM_OFFSET,
103};
104
105static struct fs_platform_info mpc85xx_fcc2_pdata = {
106 .fs_no = fsid_fcc2,
107 .cp_page = CPM_CR_FCC2_PAGE,
108 .cp_block = CPM_CR_FCC2_SBLOCK,
109
110 .rx_ring = 32,
111 .tx_ring = 32,
112 .rx_copybreak = 240,
113 .use_napi = 0,
114 .napi_weight = 17,
115
116 .clk_mask = CMX2_CLK_MASK,
117 .clk_route = CMX2_CLK_ROUTE,
118 .clk_trx = (PC_F2RXCLK | PC_F2TXCLK),
119
120 .mem_offset = FCC2_MEM_OFFSET,
121};
122
123static struct fs_platform_info mpc85xx_fcc3_pdata = {
124 .fs_no = fsid_fcc3,
125 .cp_page = CPM_CR_FCC3_PAGE,
126 .cp_block = CPM_CR_FCC3_SBLOCK,
127
128 .rx_ring = 32,
129 .tx_ring = 32,
130 .rx_copybreak = 240,
131 .use_napi = 0,
132 .napi_weight = 17,
133
134 .clk_mask = CMX3_CLK_MASK,
135 .clk_route = CMX3_CLK_ROUTE,
136 .clk_trx = (PC_F3RXCLK | PC_F3TXCLK),
137
138 .mem_offset = FCC3_MEM_OFFSET,
139};
140
85static struct plat_serial8250_port serial_platform_data[] = { 141static struct plat_serial8250_port serial_platform_data[] = {
86 [0] = { 142 [0] = {
87 .mapbase = 0x4500, 143 .mapbase = 0x4500,
@@ -318,19 +374,28 @@ struct platform_device ppc_sys_platform_devices[] = {
318 [MPC85xx_CPM_FCC1] = { 374 [MPC85xx_CPM_FCC1] = {
319 .name = "fsl-cpm-fcc", 375 .name = "fsl-cpm-fcc",
320 .id = 1, 376 .id = 1,
321 .num_resources = 3, 377 .num_resources = 4,
378 .dev.platform_data = &mpc85xx_fcc1_pdata,
322 .resource = (struct resource[]) { 379 .resource = (struct resource[]) {
323 { 380 {
381 .name = "fcc_regs",
324 .start = 0x91300, 382 .start = 0x91300,
325 .end = 0x9131F, 383 .end = 0x9131F,
326 .flags = IORESOURCE_MEM, 384 .flags = IORESOURCE_MEM,
327 }, 385 },
328 { 386 {
387 .name = "fcc_regs_c",
329 .start = 0x91380, 388 .start = 0x91380,
330 .end = 0x9139F, 389 .end = 0x9139F,
331 .flags = IORESOURCE_MEM, 390 .flags = IORESOURCE_MEM,
332 }, 391 },
333 { 392 {
393 .name = "fcc_pram",
394 .start = 0x88400,
395 .end = 0x884ff,
396 .flags = IORESOURCE_MEM,
397 },
398 {
334 .start = SIU_INT_FCC1, 399 .start = SIU_INT_FCC1,
335 .end = SIU_INT_FCC1, 400 .end = SIU_INT_FCC1,
336 .flags = IORESOURCE_IRQ, 401 .flags = IORESOURCE_IRQ,
@@ -340,19 +405,28 @@ struct platform_device ppc_sys_platform_devices[] = {
340 [MPC85xx_CPM_FCC2] = { 405 [MPC85xx_CPM_FCC2] = {
341 .name = "fsl-cpm-fcc", 406 .name = "fsl-cpm-fcc",
342 .id = 2, 407 .id = 2,
343 .num_resources = 3, 408 .num_resources = 4,
409 .dev.platform_data = &mpc85xx_fcc2_pdata,
344 .resource = (struct resource[]) { 410 .resource = (struct resource[]) {
345 { 411 {
412 .name = "fcc_regs",
346 .start = 0x91320, 413 .start = 0x91320,
347 .end = 0x9133F, 414 .end = 0x9133F,
348 .flags = IORESOURCE_MEM, 415 .flags = IORESOURCE_MEM,
349 }, 416 },
350 { 417 {
418 .name = "fcc_regs_c",
351 .start = 0x913A0, 419 .start = 0x913A0,
352 .end = 0x913CF, 420 .end = 0x913CF,
353 .flags = IORESOURCE_MEM, 421 .flags = IORESOURCE_MEM,
354 }, 422 },
355 { 423 {
424 .name = "fcc_pram",
425 .start = 0x88500,
426 .end = 0x885ff,
427 .flags = IORESOURCE_MEM,
428 },
429 {
356 .start = SIU_INT_FCC2, 430 .start = SIU_INT_FCC2,
357 .end = SIU_INT_FCC2, 431 .end = SIU_INT_FCC2,
358 .flags = IORESOURCE_IRQ, 432 .flags = IORESOURCE_IRQ,
@@ -362,19 +436,28 @@ struct platform_device ppc_sys_platform_devices[] = {
362 [MPC85xx_CPM_FCC3] = { 436 [MPC85xx_CPM_FCC3] = {
363 .name = "fsl-cpm-fcc", 437 .name = "fsl-cpm-fcc",
364 .id = 3, 438 .id = 3,
365 .num_resources = 3, 439 .num_resources = 4,
440 .dev.platform_data = &mpc85xx_fcc3_pdata,
366 .resource = (struct resource[]) { 441 .resource = (struct resource[]) {
367 { 442 {
443 .name = "fcc_regs",
368 .start = 0x91340, 444 .start = 0x91340,
369 .end = 0x9135F, 445 .end = 0x9135F,
370 .flags = IORESOURCE_MEM, 446 .flags = IORESOURCE_MEM,
371 }, 447 },
372 { 448 {
449 .name = "fcc_regs_c",
373 .start = 0x913D0, 450 .start = 0x913D0,
374 .end = 0x913FF, 451 .end = 0x913FF,
375 .flags = IORESOURCE_MEM, 452 .flags = IORESOURCE_MEM,
376 }, 453 },
377 { 454 {
455 .name = "fcc_pram",
456 .start = 0x88600,
457 .end = 0x886ff,
458 .flags = IORESOURCE_MEM,
459 },
460 {
378 .start = SIU_INT_FCC3, 461 .start = SIU_INT_FCC3,
379 .end = SIU_INT_FCC3, 462 .end = SIU_INT_FCC3,
380 .flags = IORESOURCE_IRQ, 463 .flags = IORESOURCE_IRQ,
diff --git a/arch/ppc/syslib/mpc8xx_devices.c b/arch/ppc/syslib/mpc8xx_devices.c
index 6f536383866..cf5ab47487a 100644
--- a/arch/ppc/syslib/mpc8xx_devices.c
+++ b/arch/ppc/syslib/mpc8xx_devices.c
@@ -218,6 +218,14 @@ struct platform_device ppc_sys_platform_devices[] = {
218 }, 218 },
219 }, 219 },
220 }, 220 },
221
222 [MPC8xx_MDIO_FEC] = {
223 .name = "fsl-cpm-fec-mdio",
224 .id = 0,
225 .num_resources = 0,
226
227 },
228
221}; 229};
222 230
223static int __init mach_mpc8xx_fixup(struct platform_device *pdev) 231static int __init mach_mpc8xx_fixup(struct platform_device *pdev)
diff --git a/arch/ppc/syslib/mpc8xx_sys.c b/arch/ppc/syslib/mpc8xx_sys.c
index eee21328485..18ba1d7ff9f 100644
--- a/arch/ppc/syslib/mpc8xx_sys.c
+++ b/arch/ppc/syslib/mpc8xx_sys.c
@@ -22,7 +22,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
22 .ppc_sys_name = "MPC86X", 22 .ppc_sys_name = "MPC86X",
23 .mask = 0xFFFFFFFF, 23 .mask = 0xFFFFFFFF,
24 .value = 0x00000000, 24 .value = 0x00000000,
25 .num_devices = 7, 25 .num_devices = 8,
26 .device_list = (enum ppc_sys_devices[]) 26 .device_list = (enum ppc_sys_devices[])
27 { 27 {
28 MPC8xx_CPM_FEC1, 28 MPC8xx_CPM_FEC1,
@@ -32,13 +32,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
32 MPC8xx_CPM_SCC4, 32 MPC8xx_CPM_SCC4,
33 MPC8xx_CPM_SMC1, 33 MPC8xx_CPM_SMC1,
34 MPC8xx_CPM_SMC2, 34 MPC8xx_CPM_SMC2,
35 MPC8xx_MDIO_FEC,
35 }, 36 },
36 }, 37 },
37 { 38 {
38 .ppc_sys_name = "MPC885", 39 .ppc_sys_name = "MPC885",
39 .mask = 0xFFFFFFFF, 40 .mask = 0xFFFFFFFF,
40 .value = 0x00000000, 41 .value = 0x00000000,
41 .num_devices = 8, 42 .num_devices = 9,
42 .device_list = (enum ppc_sys_devices[]) 43 .device_list = (enum ppc_sys_devices[])
43 { 44 {
44 MPC8xx_CPM_FEC1, 45 MPC8xx_CPM_FEC1,
@@ -49,6 +50,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
49 MPC8xx_CPM_SCC4, 50 MPC8xx_CPM_SCC4,
50 MPC8xx_CPM_SMC1, 51 MPC8xx_CPM_SMC1,
51 MPC8xx_CPM_SMC2, 52 MPC8xx_CPM_SMC2,
53 MPC8xx_MDIO_FEC,
52 }, 54 },
53 }, 55 },
54 { /* default match */ 56 { /* default match */
diff --git a/arch/ppc/syslib/pq2_devices.c b/arch/ppc/syslib/pq2_devices.c
index 8692d00c08c..fefbc217a56 100644
--- a/arch/ppc/syslib/pq2_devices.c
+++ b/arch/ppc/syslib/pq2_devices.c
@@ -369,6 +369,11 @@ struct platform_device ppc_sys_platform_devices[] = {
369 }, 369 },
370 }, 370 },
371 }, 371 },
372 [MPC82xx_MDIO_BB] = {
373 .name = "fsl-bb-mdio",
374 .id = 0,
375 .num_resources = 0,
376 },
372}; 377};
373 378
374static int __init mach_mpc82xx_fixup(struct platform_device *pdev) 379static int __init mach_mpc82xx_fixup(struct platform_device *pdev)
diff --git a/arch/ppc/syslib/pq2_sys.c b/arch/ppc/syslib/pq2_sys.c
index fee8948162b..f52600c0db2 100644
--- a/arch/ppc/syslib/pq2_sys.c
+++ b/arch/ppc/syslib/pq2_sys.c
@@ -139,13 +139,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
139 .ppc_sys_name = "8272", 139 .ppc_sys_name = "8272",
140 .mask = 0x0000ff00, 140 .mask = 0x0000ff00,
141 .value = 0x00000c00, 141 .value = 0x00000c00,
142 .num_devices = 12, 142 .num_devices = 13,
143 .device_list = (enum ppc_sys_devices[]) 143 .device_list = (enum ppc_sys_devices[])
144 { 144 {
145 MPC82xx_CPM_FCC1, MPC82xx_CPM_FCC2, MPC82xx_CPM_SCC1, 145 MPC82xx_CPM_FCC1, MPC82xx_CPM_FCC2, MPC82xx_CPM_SCC1,
146 MPC82xx_CPM_SCC2, MPC82xx_CPM_SCC3, MPC82xx_CPM_SCC4, 146 MPC82xx_CPM_SCC2, MPC82xx_CPM_SCC3, MPC82xx_CPM_SCC4,
147 MPC82xx_CPM_SMC1, MPC82xx_CPM_SMC2, MPC82xx_CPM_SPI, 147 MPC82xx_CPM_SMC1, MPC82xx_CPM_SMC2, MPC82xx_CPM_SPI,
148 MPC82xx_CPM_I2C, MPC82xx_CPM_USB, MPC82xx_SEC1, 148 MPC82xx_CPM_I2C, MPC82xx_CPM_USB, MPC82xx_SEC1,
149 MPC82xx_MDIO_BB,
149 }, 150 },
150 }, 151 },
151 /* below is a list of the 8280 family of processors */ 152 /* below is a list of the 8280 family of processors */
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 6a4b5f9715c..a0a94e0ef8d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -618,7 +618,7 @@ appldata_offline_cpu(int cpu)
618} 618}
619 619
620#ifdef CONFIG_HOTPLUG_CPU 620#ifdef CONFIG_HOTPLUG_CPU
621static int 621static int __cpuinit
622appldata_cpu_notify(struct notifier_block *self, 622appldata_cpu_notify(struct notifier_block *self,
623 unsigned long action, void *hcpu) 623 unsigned long action, void *hcpu)
624{ 624{
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index eb6ebfef134..6e6b6de7777 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -129,7 +129,7 @@ void __init paging_init(void)
129 /* 129 /*
130 * pg_table is physical at this point 130 * pg_table is physical at this point
131 */ 131 */
132 pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133 133
134 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); 134 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table));
135 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024)); 135 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
@@ -219,7 +219,7 @@ void __init paging_init(void)
219 continue; 219 continue;
220 } 220 }
221 221
222 pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4); 222 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4);
223 pgd_populate(&init_mm, pg_dir, pm_dir); 223 pgd_populate(&init_mm, pg_dir, pm_dir);
224 224
225 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 225 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
@@ -228,7 +228,7 @@ void __init paging_init(void)
228 continue; 228 continue;
229 } 229 }
230 230
231 pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 231 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
232 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 232 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
233 233
234 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { 234 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 35488d6c745..0251cab4708 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -348,9 +348,9 @@ void __init setup_arch(char **cmdline_p)
348 init_mm.context = (unsigned long) NO_CONTEXT; 348 init_mm.context = (unsigned long) NO_CONTEXT;
349 init_task.thread.kregs = &fake_swapper_regs; 349 init_task.thread.kregs = &fake_swapper_regs;
350 350
351 smp_setup_cpu_possible_map();
352
353 paging_init(); 351 paging_init();
352
353 smp_setup_cpu_possible_map();
354} 354}
355 355
356static int __init set_preferred_console(void) 356static int __init set_preferred_console(void)
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index e311ade1b49..276f22881d0 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -34,7 +34,6 @@
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35#include <asm/cpudata.h> 35#include <asm/cpudata.h>
36 36
37volatile int smp_processors_ready = 0;
38int smp_num_cpus = 1; 37int smp_num_cpus = 1;
39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; 38volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
40unsigned char boot_cpu_id = 0; 39unsigned char boot_cpu_id = 0;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index ba843f6a283..3ff4edd3281 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -42,7 +42,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
42 42
43extern void calibrate_delay(void); 43extern void calibrate_delay(void);
44 44
45extern volatile int smp_processors_ready; 45static volatile int smp_processors_ready = 0;
46static int smp_highest_cpu; 46static int smp_highest_cpu;
47extern volatile unsigned long cpu_callin_map[NR_CPUS]; 47extern volatile unsigned long cpu_callin_map[NR_CPUS];
48extern cpuinfo_sparc cpu_data[NR_CPUS]; 48extern cpuinfo_sparc cpu_data[NR_CPUS];
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 3b32096134a..7d4a649138f 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -39,7 +39,6 @@ extern ctxd_t *srmmu_ctx_table_phys;
39 39
40extern void calibrate_delay(void); 40extern void calibrate_delay(void);
41 41
42extern volatile int smp_processors_ready;
43extern volatile unsigned long cpu_callin_map[NR_CPUS]; 42extern volatile unsigned long cpu_callin_map[NR_CPUS];
44extern unsigned char boot_cpu_id; 43extern unsigned char boot_cpu_id;
45 44
@@ -217,7 +216,6 @@ void __init smp4m_smp_done(void)
217 } 216 }
218 217
219 /* Ok, they are spinning and ready to go. */ 218 /* Ok, they are spinning and ready to go. */
220 smp_processors_ready = 1;
221} 219}
222 220
223/* At each hardware IRQ, we get this called to forward IRQ reception 221/* At each hardware IRQ, we get this called to forward IRQ reception
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 8cb06205d26..af9d81db0b3 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -69,6 +69,8 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
69 } else 69 } else
70 offset += PAGE_SIZE; 70 offset += PAGE_SIZE;
71 71
72 if (pte_write(entry))
73 entry = pte_mkdirty(entry);
72 do { 74 do {
73 BUG_ON(!pte_none(*pte)); 75 BUG_ON(!pte_none(*pte));
74 set_pte_at(mm, address, pte, entry); 76 set_pte_at(mm, address, pte, entry);
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 840d5d93d5c..5fb97071594 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc2 3# Linux kernel version: 2.6.18-rc4
4# Tue Jul 18 17:13:20 2006 4# Thu Aug 24 21:05:55 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -201,7 +201,7 @@ CONFIG_ACPI_THERMAL=y
201CONFIG_ACPI_NUMA=y 201CONFIG_ACPI_NUMA=y
202# CONFIG_ACPI_ASUS is not set 202# CONFIG_ACPI_ASUS is not set
203# CONFIG_ACPI_IBM is not set 203# CONFIG_ACPI_IBM is not set
204CONFIG_ACPI_TOSHIBA=y 204# CONFIG_ACPI_TOSHIBA is not set
205CONFIG_ACPI_BLACKLIST_YEAR=0 205CONFIG_ACPI_BLACKLIST_YEAR=0
206# CONFIG_ACPI_DEBUG is not set 206# CONFIG_ACPI_DEBUG is not set
207CONFIG_ACPI_EC=y 207CONFIG_ACPI_EC=y
@@ -216,7 +216,7 @@ CONFIG_ACPI_CONTAINER=y
216# 216#
217CONFIG_CPU_FREQ=y 217CONFIG_CPU_FREQ=y
218CONFIG_CPU_FREQ_TABLE=y 218CONFIG_CPU_FREQ_TABLE=y
219# CONFIG_CPU_FREQ_DEBUG is not set 219CONFIG_CPU_FREQ_DEBUG=y
220CONFIG_CPU_FREQ_STAT=y 220CONFIG_CPU_FREQ_STAT=y
221# CONFIG_CPU_FREQ_STAT_DETAILS is not set 221# CONFIG_CPU_FREQ_STAT_DETAILS is not set
222CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y 222CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
@@ -495,8 +495,9 @@ CONFIG_SCSI=y
495CONFIG_BLK_DEV_SD=y 495CONFIG_BLK_DEV_SD=y
496# CONFIG_CHR_DEV_ST is not set 496# CONFIG_CHR_DEV_ST is not set
497# CONFIG_CHR_DEV_OSST is not set 497# CONFIG_CHR_DEV_OSST is not set
498# CONFIG_BLK_DEV_SR is not set 498CONFIG_BLK_DEV_SR=y
499# CONFIG_CHR_DEV_SG is not set 499# CONFIG_BLK_DEV_SR_VENDOR is not set
500CONFIG_CHR_DEV_SG=y
500# CONFIG_CHR_DEV_SCH is not set 501# CONFIG_CHR_DEV_SCH is not set
501 502
502# 503#
@@ -512,7 +513,7 @@ CONFIG_SCSI_CONSTANTS=y
512CONFIG_SCSI_SPI_ATTRS=y 513CONFIG_SCSI_SPI_ATTRS=y
513CONFIG_SCSI_FC_ATTRS=y 514CONFIG_SCSI_FC_ATTRS=y
514# CONFIG_SCSI_ISCSI_ATTRS is not set 515# CONFIG_SCSI_ISCSI_ATTRS is not set
515# CONFIG_SCSI_SAS_ATTRS is not set 516CONFIG_SCSI_SAS_ATTRS=y
516 517
517# 518#
518# SCSI low-level drivers 519# SCSI low-level drivers
@@ -538,7 +539,7 @@ CONFIG_MEGARAID_MAILBOX=y
538CONFIG_MEGARAID_SAS=y 539CONFIG_MEGARAID_SAS=y
539CONFIG_SCSI_SATA=y 540CONFIG_SCSI_SATA=y
540CONFIG_SCSI_SATA_AHCI=y 541CONFIG_SCSI_SATA_AHCI=y
541# CONFIG_SCSI_SATA_SVW is not set 542CONFIG_SCSI_SATA_SVW=y
542CONFIG_SCSI_ATA_PIIX=y 543CONFIG_SCSI_ATA_PIIX=y
543# CONFIG_SCSI_SATA_MV is not set 544# CONFIG_SCSI_SATA_MV is not set
544CONFIG_SCSI_SATA_NV=y 545CONFIG_SCSI_SATA_NV=y
@@ -589,7 +590,7 @@ CONFIG_BLK_DEV_DM=y
589CONFIG_FUSION=y 590CONFIG_FUSION=y
590CONFIG_FUSION_SPI=y 591CONFIG_FUSION_SPI=y
591# CONFIG_FUSION_FC is not set 592# CONFIG_FUSION_FC is not set
592# CONFIG_FUSION_SAS is not set 593CONFIG_FUSION_SAS=y
593CONFIG_FUSION_MAX_SGE=128 594CONFIG_FUSION_MAX_SGE=128
594# CONFIG_FUSION_CTL is not set 595# CONFIG_FUSION_CTL is not set
595 596
@@ -675,7 +676,7 @@ CONFIG_NET_PCI=y
675# CONFIG_PCNET32 is not set 676# CONFIG_PCNET32 is not set
676# CONFIG_AMD8111_ETH is not set 677# CONFIG_AMD8111_ETH is not set
677# CONFIG_ADAPTEC_STARFIRE is not set 678# CONFIG_ADAPTEC_STARFIRE is not set
678# CONFIG_B44 is not set 679CONFIG_B44=y
679CONFIG_FORCEDETH=y 680CONFIG_FORCEDETH=y
680# CONFIG_DGRS is not set 681# CONFIG_DGRS is not set
681# CONFIG_EEPRO100 is not set 682# CONFIG_EEPRO100 is not set
@@ -712,7 +713,7 @@ CONFIG_E1000=y
712# CONFIG_SK98LIN is not set 713# CONFIG_SK98LIN is not set
713# CONFIG_VIA_VELOCITY is not set 714# CONFIG_VIA_VELOCITY is not set
714CONFIG_TIGON3=y 715CONFIG_TIGON3=y
715# CONFIG_BNX2 is not set 716CONFIG_BNX2=y
716 717
717# 718#
718# Ethernet (10000 Mbit) 719# Ethernet (10000 Mbit)
@@ -842,44 +843,7 @@ CONFIG_LEGACY_PTY_COUNT=256
842# 843#
843# Watchdog Cards 844# Watchdog Cards
844# 845#
845CONFIG_WATCHDOG=y 846# CONFIG_WATCHDOG is not set
846# CONFIG_WATCHDOG_NOWAYOUT is not set
847
848#
849# Watchdog Device Drivers
850#
851CONFIG_SOFT_WATCHDOG=y
852# CONFIG_ACQUIRE_WDT is not set
853# CONFIG_ADVANTECH_WDT is not set
854# CONFIG_ALIM1535_WDT is not set
855# CONFIG_ALIM7101_WDT is not set
856# CONFIG_SC520_WDT is not set
857# CONFIG_EUROTECH_WDT is not set
858# CONFIG_IB700_WDT is not set
859# CONFIG_IBMASR is not set
860# CONFIG_WAFER_WDT is not set
861# CONFIG_I6300ESB_WDT is not set
862# CONFIG_I8XX_TCO is not set
863# CONFIG_SC1200_WDT is not set
864# CONFIG_60XX_WDT is not set
865# CONFIG_SBC8360_WDT is not set
866# CONFIG_CPU5_WDT is not set
867# CONFIG_W83627HF_WDT is not set
868# CONFIG_W83877F_WDT is not set
869# CONFIG_W83977F_WDT is not set
870# CONFIG_MACHZ_WDT is not set
871# CONFIG_SBC_EPX_C3_WATCHDOG is not set
872
873#
874# PCI-based Watchdog Cards
875#
876# CONFIG_PCIPCWATCHDOG is not set
877# CONFIG_WDTPCI is not set
878
879#
880# USB-based Watchdog Cards
881#
882# CONFIG_USBPCWATCHDOG is not set
883CONFIG_HW_RANDOM=y 847CONFIG_HW_RANDOM=y
884CONFIG_HW_RANDOM_INTEL=y 848CONFIG_HW_RANDOM_INTEL=y
885CONFIG_HW_RANDOM_AMD=y 849CONFIG_HW_RANDOM_AMD=y
@@ -1056,6 +1020,7 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y
1056CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256 1020CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256
1057CONFIG_VIDEO_SELECT=y 1021CONFIG_VIDEO_SELECT=y
1058CONFIG_DUMMY_CONSOLE=y 1022CONFIG_DUMMY_CONSOLE=y
1023# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
1059 1024
1060# 1025#
1061# Sound 1026# Sound
@@ -1301,7 +1266,7 @@ CONFIG_INOTIFY=y
1301CONFIG_INOTIFY_USER=y 1266CONFIG_INOTIFY_USER=y
1302# CONFIG_QUOTA is not set 1267# CONFIG_QUOTA is not set
1303CONFIG_DNOTIFY=y 1268CONFIG_DNOTIFY=y
1304CONFIG_AUTOFS_FS=y 1269# CONFIG_AUTOFS_FS is not set
1305CONFIG_AUTOFS4_FS=y 1270CONFIG_AUTOFS4_FS=y
1306# CONFIG_FUSE_FS is not set 1271# CONFIG_FUSE_FS is not set
1307 1272
@@ -1494,4 +1459,5 @@ CONFIG_DEBUG_STACKOVERFLOW=y
1494# CONFIG_CRC16 is not set 1459# CONFIG_CRC16 is not set
1495CONFIG_CRC32=y 1460CONFIG_CRC32=y
1496# CONFIG_LIBCRC32C is not set 1461# CONFIG_LIBCRC32C is not set
1462CONFIG_ZLIB_INFLATE=y
1497CONFIG_PLIST=y 1463CONFIG_PLIST=y
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index a9dc0f3b5b5..2fd5a67fd43 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -73,39 +73,44 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
73 * Dumping its extra ELF program headers includes all the other information 73 * Dumping its extra ELF program headers includes all the other information
74 * a debugger needs to easily find how the vsyscall DSO was being used. 74 * a debugger needs to easily find how the vsyscall DSO was being used.
75 */ 75 */
76#define ELF_CORE_EXTRA_PHDRS (VSYSCALL32_EHDR->e_phnum) 76#define ELF_CORE_EXTRA_PHDRS (find_vma(current->mm, VSYSCALL32_BASE) ? \
77 (VSYSCALL32_EHDR->e_phnum) : 0)
77#define ELF_CORE_WRITE_EXTRA_PHDRS \ 78#define ELF_CORE_WRITE_EXTRA_PHDRS \
78do { \ 79do { \
79 const struct elf32_phdr *const vsyscall_phdrs = \ 80 if (find_vma(current->mm, VSYSCALL32_BASE)) { \
80 (const struct elf32_phdr *) (VSYSCALL32_BASE \ 81 const struct elf32_phdr *const vsyscall_phdrs = \
81 + VSYSCALL32_EHDR->e_phoff); \ 82 (const struct elf32_phdr *) (VSYSCALL32_BASE \
82 int i; \ 83 + VSYSCALL32_EHDR->e_phoff);\
83 Elf32_Off ofs = 0; \ 84 int i; \
84 for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ 85 Elf32_Off ofs = 0; \
85 struct elf32_phdr phdr = vsyscall_phdrs[i]; \ 86 for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
86 if (phdr.p_type == PT_LOAD) { \ 87 struct elf32_phdr phdr = vsyscall_phdrs[i]; \
87 BUG_ON(ofs != 0); \ 88 if (phdr.p_type == PT_LOAD) { \
88 ofs = phdr.p_offset = offset; \ 89 BUG_ON(ofs != 0); \
89 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ 90 ofs = phdr.p_offset = offset; \
90 phdr.p_filesz = phdr.p_memsz; \ 91 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
91 offset += phdr.p_filesz; \ 92 phdr.p_filesz = phdr.p_memsz; \
93 offset += phdr.p_filesz; \
94 } \
95 else \
96 phdr.p_offset += ofs; \
97 phdr.p_paddr = 0; /* match other core phdrs */ \
98 DUMP_WRITE(&phdr, sizeof(phdr)); \
92 } \ 99 } \
93 else \
94 phdr.p_offset += ofs; \
95 phdr.p_paddr = 0; /* match other core phdrs */ \
96 DUMP_WRITE(&phdr, sizeof(phdr)); \
97 } \ 100 } \
98} while (0) 101} while (0)
99#define ELF_CORE_WRITE_EXTRA_DATA \ 102#define ELF_CORE_WRITE_EXTRA_DATA \
100do { \ 103do { \
101 const struct elf32_phdr *const vsyscall_phdrs = \ 104 if (find_vma(current->mm, VSYSCALL32_BASE)) { \
102 (const struct elf32_phdr *) (VSYSCALL32_BASE \ 105 const struct elf32_phdr *const vsyscall_phdrs = \
103 + VSYSCALL32_EHDR->e_phoff); \ 106 (const struct elf32_phdr *) (VSYSCALL32_BASE \
104 int i; \ 107 + VSYSCALL32_EHDR->e_phoff); \
105 for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ 108 int i; \
106 if (vsyscall_phdrs[i].p_type == PT_LOAD) \ 109 for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
107 DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr, \ 110 if (vsyscall_phdrs[i].p_type == PT_LOAD) \
108 PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ 111 DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\
112 PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
113 } \
109 } \ 114 } \
110} while (0) 115} while (0)
111 116
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index e56c2adf57a..764bf23c710 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -71,7 +71,11 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
71#endif 71#endif
72 /* kernel code + 640k memory hole (later should not be needed, but 72 /* kernel code + 640k memory hole (later should not be needed, but
73 be paranoid for now) */ 73 be paranoid for now) */
74 if (last >= 640*1024 && addr < __pa_symbol(&_end)) { 74 if (last >= 640*1024 && addr < 1024*1024) {
75 *addrp = 1024*1024;
76 return 1;
77 }
78 if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
75 *addrp = __pa_symbol(&_end); 79 *addrp = __pa_symbol(&_end);
76 return 1; 80 return 1;
77 } 81 }
@@ -104,35 +108,6 @@ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
104 return 0; 108 return 0;
105} 109}
106 110
107/*
108 * This function checks if the entire range <start,end> is mapped with type.
109 *
110 * Note: this function only works correct if the e820 table is sorted and
111 * not-overlapping, which is the case
112 */
113int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
114{
115 int i;
116 for (i = 0; i < e820.nr_map; i++) {
117 struct e820entry *ei = &e820.map[i];
118 if (type && ei->type != type)
119 continue;
120 /* is the region (part) in overlap with the current region ?*/
121 if (ei->addr >= end || ei->addr + ei->size <= start)
122 continue;
123
124 /* if the region is at the beginning of <start,end> we move
125 * start to the end of the region since it's ok until there
126 */
127 if (ei->addr <= start)
128 start = ei->addr + ei->size;
129 /* if start is now at or beyond end, we're done, full coverage */
130 if (start >= end)
131 return 1; /* we're done */
132 }
133 return 0;
134}
135
136/* 111/*
137 * Find a free area in a specific range. 112 * Find a free area in a specific range.
138 */ 113 */
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 6f810424df4..aa8d8939abc 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -973,6 +973,8 @@ ENTRY(kernel_thread)
973ENDPROC(kernel_thread) 973ENDPROC(kernel_thread)
974 974
975child_rip: 975child_rip:
976 pushq $0 # fake return address
977 CFI_STARTPROC
976 /* 978 /*
977 * Here we are in the child and the registers are set as they were 979 * Here we are in the child and the registers are set as they were
978 * at kernel_thread() invocation in the parent. 980 * at kernel_thread() invocation in the parent.
@@ -983,6 +985,7 @@ child_rip:
983 # exit 985 # exit
984 xorl %edi, %edi 986 xorl %edi, %edi
985 call do_exit 987 call do_exit
988 CFI_ENDPROC
986ENDPROC(child_rip) 989ENDPROC(child_rip)
987 990
988/* 991/*
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 6df05e6034f..c9739ca81d0 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -191,6 +191,7 @@ startup_64:
191 * jump 191 * jump
192 */ 192 */
193 movq initial_code(%rip),%rax 193 movq initial_code(%rip),%rax
194 pushq $0 # fake return address
194 jmp *%rax 195 jmp *%rax
195 196
196 /* SMP bootup changes these two */ 197 /* SMP bootup changes these two */
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index ce31d904d60..3dc5854ba21 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -46,4 +46,9 @@ EXPORT_SYMBOL(init_task);
46 */ 46 */
47DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; 47DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
48 48
49/* Copies of the original ist values from the tss are only accessed during
50 * debugging, no special alignment required.
51 */
52DEFINE_PER_CPU(struct orig_ist, orig_ist);
53
49#define ALIGN_TO_4K __attribute__((section(".data.init_task"))) 54#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 8a099ff1f8b..34afad70482 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -521,8 +521,6 @@ static void discover_ebda(void)
521 521
522void __init setup_arch(char **cmdline_p) 522void __init setup_arch(char **cmdline_p)
523{ 523{
524 unsigned long kernel_end;
525
526 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 524 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
527 screen_info = SCREEN_INFO; 525 screen_info = SCREEN_INFO;
528 edid_info = EDID_INFO; 526 edid_info = EDID_INFO;
@@ -596,8 +594,8 @@ void __init setup_arch(char **cmdline_p)
596 (table_end - table_start) << PAGE_SHIFT); 594 (table_end - table_start) << PAGE_SHIFT);
597 595
598 /* reserve kernel */ 596 /* reserve kernel */
599 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE); 597 reserve_bootmem_generic(__pa_symbol(&_text),
600 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY); 598 __pa_symbol(&_end) - __pa_symbol(&_text));
601 599
602 /* 600 /*
603 * reserve physical page 0 - it's a special BIOS page on many boxes, 601 * reserve physical page 0 - it's a special BIOS page on many boxes,
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 6fe58a634b5..417de564456 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -189,6 +189,7 @@ void __cpuinit cpu_init (void)
189{ 189{
190 int cpu = stack_smp_processor_id(); 190 int cpu = stack_smp_processor_id();
191 struct tss_struct *t = &per_cpu(init_tss, cpu); 191 struct tss_struct *t = &per_cpu(init_tss, cpu);
192 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
192 unsigned long v; 193 unsigned long v;
193 char *estacks = NULL; 194 char *estacks = NULL;
194 struct task_struct *me; 195 struct task_struct *me;
@@ -256,7 +257,7 @@ void __cpuinit cpu_init (void)
256 estacks += EXCEPTION_STKSZ; 257 estacks += EXCEPTION_STKSZ;
257 break; 258 break;
258 } 259 }
259 t->ist[v] = (unsigned long)estacks; 260 orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
260 } 261 }
261 262
262 t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 263 t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 4e9938dee06..b1249774d1e 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -107,7 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
107} 107}
108 108
109static int kstack_depth_to_print = 12; 109static int kstack_depth_to_print = 12;
110#ifdef CONFIG_STACK_UNWIND
110static int call_trace = 1; 111static int call_trace = 1;
112#else
113#define call_trace (-1)
114#endif
111 115
112#ifdef CONFIG_KALLSYMS 116#ifdef CONFIG_KALLSYMS
113# include <linux/kallsyms.h> 117# include <linux/kallsyms.h>
@@ -174,7 +178,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
174 break; 178 break;
175#endif 179#endif
176 default: 180 default:
177 end = per_cpu(init_tss, cpu).ist[k]; 181 end = per_cpu(orig_ist, cpu).ist[k];
178 break; 182 break;
179 } 183 }
180 /* 184 /*
@@ -274,21 +278,21 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
274 if (unwind_init_blocked(&info, tsk) == 0) 278 if (unwind_init_blocked(&info, tsk) == 0)
275 unw_ret = show_trace_unwind(&info, NULL); 279 unw_ret = show_trace_unwind(&info, NULL);
276 } 280 }
277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) { 281 if (unw_ret > 0) {
278#ifdef CONFIG_STACK_UNWIND 282 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
279 unsigned long rip = info.regs.rip; 283 print_symbol("DWARF2 unwinder stuck at %s\n",
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip); 284 UNW_PC(&info));
281 if (call_trace == 1) { 285 if ((long)UNW_SP(&info) < 0) {
282 printk("Leftover inexact backtrace:\n"); 286 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp; 287 stack = (unsigned long *)UNW_SP(&info);
284 } else if (call_trace > 1) 288 } else
289 printk("Full inexact backtrace again:\n");
290 } else if (call_trace >= 1)
285 return; 291 return;
286 else 292 else
287 printk("Full inexact backtrace again:\n"); 293 printk("Full inexact backtrace again:\n");
288#else 294 } else
289 printk("Inexact backtrace:\n"); 295 printk("Inexact backtrace:\n");
290#endif
291 }
292 } 296 }
293 297
294 /* 298 /*
@@ -529,7 +533,7 @@ void __kprobes oops_end(unsigned long flags)
529 /* Nest count reaches zero, release the lock. */ 533 /* Nest count reaches zero, release the lock. */
530 spin_unlock_irqrestore(&die_lock, flags); 534 spin_unlock_irqrestore(&die_lock, flags);
531 if (panic_on_oops) 535 if (panic_on_oops)
532 panic("Fatal exception: panic_on_oops"); 536 panic("Fatal exception");
533} 537}
534 538
535void __kprobes __die(const char * str, struct pt_regs * regs, long err) 539void __kprobes __die(const char * str, struct pt_regs * regs, long err)
@@ -1120,6 +1124,7 @@ static int __init kstack_setup(char *s)
1120} 1124}
1121__setup("kstack=", kstack_setup); 1125__setup("kstack=", kstack_setup);
1122 1126
1127#ifdef CONFIG_STACK_UNWIND
1123static int __init call_trace_setup(char *s) 1128static int __init call_trace_setup(char *s)
1124{ 1129{
1125 if (strcmp(s, "old") == 0) 1130 if (strcmp(s, "old") == 0)
@@ -1133,3 +1138,4 @@ static int __init call_trace_setup(char *s)
1133 return 1; 1138 return 1;
1134} 1139}
1135__setup("call_trace=", call_trace_setup); 1140__setup("call_trace=", call_trace_setup);
1141#endif
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index 3c55c76c6fd..2d48a7941d4 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -9,6 +9,7 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/acpi.h> 10#include <linux/acpi.h>
11#include <linux/bitmap.h> 11#include <linux/bitmap.h>
12#include <linux/dmi.h>
12#include <asm/e820.h> 13#include <asm/e820.h>
13 14
14#include "pci.h" 15#include "pci.h"
@@ -164,11 +165,33 @@ static __init void unreachable_devices(void)
164 } 165 }
165} 166}
166 167
168static int disable_mcfg(struct dmi_system_id *d)
169{
170 printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
171 pci_probe &= ~PCI_PROBE_MMCONF;
172 return 0;
173}
174
175static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
176 /* Has broken MCFG table that makes the system hang when used */
177 {
178 .callback = disable_mcfg,
179 .ident = "Intel D3C5105 SDV",
180 .matches = {
181 DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
182 DMI_MATCH(DMI_BOARD_NAME, "D26928"),
183 },
184 },
185 {}
186};
187
167void __init pci_mmcfg_init(void) 188void __init pci_mmcfg_init(void)
168{ 189{
169 int i; 190 int i;
170 191
171 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 192 dmi_check_system(dmi_bad_mcfg);
193
194 if ((pci_probe & (PCI_PROBE_MMCONF|PCI_PROBE_MMCONF_FORCE)) == 0)
172 return; 195 return;
173 196
174 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 197 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@@ -177,15 +200,6 @@ void __init pci_mmcfg_init(void)
177 (pci_mmcfg_config[0].base_address == 0)) 200 (pci_mmcfg_config[0].base_address == 0))
178 return; 201 return;
179 202
180 if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
181 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
182 E820_RESERVED)) {
183 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
184 pci_mmcfg_config[0].base_address);
185 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
186 return;
187 }
188
189 /* RED-PEN i386 doesn't do _nocache right now */ 203 /* RED-PEN i386 doesn't do _nocache right now */
190 pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL); 204 pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
191 if (pci_mmcfg_virt == NULL) { 205 if (pci_mmcfg_virt == NULL) {
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 9734960a245..ce077d6bf3a 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -488,7 +488,7 @@ void die(const char * str, struct pt_regs * regs, long err)
488 panic("Fatal exception in interrupt"); 488 panic("Fatal exception in interrupt");
489 489
490 if (panic_on_oops) 490 if (panic_on_oops)
491 panic("Fatal exception: panic_on_oops"); 491 panic("Fatal exception");
492 492
493 do_exit(err); 493 do_exit(err);
494} 494}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index aae3123bf3e..3a3aee08ec5 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1561,7 +1561,7 @@ restart:
1561 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1561 /* ->key must be copied to avoid race with cfq_exit_queue() */
1562 k = __cic->key; 1562 k = __cic->key;
1563 if (unlikely(!k)) { 1563 if (unlikely(!k)) {
1564 cfq_drop_dead_cic(ioc, cic); 1564 cfq_drop_dead_cic(ioc, __cic);
1565 goto restart; 1565 goto restart;
1566 } 1566 }
1567 1567
diff --git a/block/elevator.c b/block/elevator.c
index bc7baeec0d1..9b72dc7c8a5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -765,7 +765,8 @@ void elv_unregister(struct elevator_type *e)
765 read_lock(&tasklist_lock); 765 read_lock(&tasklist_lock);
766 do_each_thread(g, p) { 766 do_each_thread(g, p) {
767 task_lock(p); 767 task_lock(p);
768 e->ops.trim(p->io_context); 768 if (p->io_context)
769 e->ops.trim(p->io_context);
769 task_unlock(p); 770 task_unlock(p);
770 } while_each_thread(g, p); 771 } while_each_thread(g, p);
771 read_unlock(&tasklist_lock); 772 read_unlock(&tasklist_lock);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 61d6b3c65b6..ddd9253f9d5 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3628,6 +3628,8 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3628 ret->nr_batch_requests = 0; /* because this is 0 */ 3628 ret->nr_batch_requests = 0; /* because this is 0 */
3629 ret->aic = NULL; 3629 ret->aic = NULL;
3630 ret->cic_root.rb_node = NULL; 3630 ret->cic_root.rb_node = NULL;
3631 /* make sure set_task_ioprio() sees the settings above */
3632 smp_wmb();
3631 tsk->io_context = ret; 3633 tsk->io_context = ret;
3632 } 3634 }
3633 3635
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 96309b9660d..11abc7bf777 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -285,6 +285,8 @@ static int __init acpi_ac_init(void)
285{ 285{
286 int result; 286 int result;
287 287
288 if (acpi_disabled)
289 return -ENODEV;
288 290
289 acpi_ac_dir = acpi_lock_ac_dir(); 291 acpi_ac_dir = acpi_lock_ac_dir();
290 if (!acpi_ac_dir) 292 if (!acpi_ac_dir)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b0d4b147b19..1dda370f402 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle,
484 484
485 485
486 status = is_memory_device(handle); 486 status = is_memory_device(handle);
487 if (ACPI_FAILURE(status)){ 487 if (ACPI_FAILURE(status))
488 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
489 return AE_OK; /* continue */ 488 return AE_OK; /* continue */
490 }
491 489
492 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 490 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
493 acpi_memory_device_notify, NULL); 491 acpi_memory_device_notify, NULL);
@@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle,
503 501
504 502
505 status = is_memory_device(handle); 503 status = is_memory_device(handle);
506 if (ACPI_FAILURE(status)){ 504 if (ACPI_FAILURE(status))
507 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
508 return AE_OK; /* continue */ 505 return AE_OK; /* continue */
509 }
510 506
511 status = acpi_remove_notify_handler(handle, 507 status = acpi_remove_notify_handler(handle,
512 ACPI_SYSTEM_NOTIFY, 508 ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6e5221707d9..9810e2a55d0 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -757,6 +757,9 @@ static int __init acpi_battery_init(void)
757{ 757{
758 int result; 758 int result;
759 759
760 if (acpi_disabled)
761 return -ENODEV;
762
760 acpi_battery_dir = acpi_lock_battery_dir(); 763 acpi_battery_dir = acpi_lock_battery_dir();
761 if (!acpi_battery_dir) 764 if (!acpi_battery_dir)
762 return -ENODEV; 765 return -ENODEV;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b2977695e12..279c4bac92e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/pm.h> 31#include <linux/pm.h>
@@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
68 69
69 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device); 70 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
70 if (ACPI_FAILURE(status) || !*device) { 71 if (ACPI_FAILURE(status) || !*device) {
71 ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle)); 72 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
73 handle));
72 return -ENODEV; 74 return -ENODEV;
73 } 75 }
74 76
@@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
192 /* Make sure this is a valid target state */ 194 /* Make sure this is a valid target state */
193 195
194 if (!device->flags.power_manageable) { 196 if (!device->flags.power_manageable) {
195 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable", 197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
196 device->kobj.name)); 198 device->kobj.name));
197 return -ENODEV; 199 return -ENODEV;
198 } 200 }
@@ -738,7 +740,10 @@ static int __init acpi_init(void)
738 return -ENODEV; 740 return -ENODEV;
739 } 741 }
740 742
741 firmware_register(&acpi_subsys); 743 result = firmware_register(&acpi_subsys);
744 if (result < 0)
745 printk(KERN_WARNING "%s: firmware_register error: %d\n",
746 __FUNCTION__, result);
742 747
743 result = acpi_bus_init(); 748 result = acpi_bus_init();
744 749
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 32c9d88fd19..1ba2db67186 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -91,6 +91,14 @@ enum {
91 HK_EVENT_ENTERRING_S5, 91 HK_EVENT_ENTERRING_S5,
92}; 92};
93 93
94enum conf_entry_enum {
95 bus_handle = 0,
96 bus_method = 1,
97 action_handle = 2,
98 method = 3,
99 LAST_CONF_ENTRY
100};
101
94/* procdir we use */ 102/* procdir we use */
95static struct proc_dir_entry *hotkey_proc_dir; 103static struct proc_dir_entry *hotkey_proc_dir;
96static struct proc_dir_entry *hotkey_config; 104static struct proc_dir_entry *hotkey_config;
@@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file)
244 252
245static char *format_result(union acpi_object *object) 253static char *format_result(union acpi_object *object)
246{ 254{
247 char *buf = NULL; 255 char *buf;
248
249 buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL);
250 if (buf)
251 memset(buf, 0, RESULT_STR_LEN);
252 else
253 goto do_fail;
254 256
257 buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
258 if (!buf)
259 return NULL;
255 /* Now, just support integer type */ 260 /* Now, just support integer type */
256 if (object->type == ACPI_TYPE_INTEGER) 261 if (object->type == ACPI_TYPE_INTEGER)
257 sprintf(buf, "%d\n", (u32) object->integer.value); 262 sprintf(buf, "%d\n", (u32) object->integer.value);
258 do_fail: 263 return buf;
259 return (buf);
260} 264}
261 265
262static int hotkey_polling_seq_show(struct seq_file *seq, void *offset) 266static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
@@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key)
486 490
487static void free_hotkey_buffer(union acpi_hotkey *key) 491static void free_hotkey_buffer(union acpi_hotkey *key)
488{ 492{
493 /* key would never be null, action method could be */
489 kfree(key->event_hotkey.action_method); 494 kfree(key->event_hotkey.action_method);
490} 495}
491 496
492static void free_poll_hotkey_buffer(union acpi_hotkey *key) 497static void free_poll_hotkey_buffer(union acpi_hotkey *key)
493{ 498{
499 /* key would never be null, others could be*/
494 kfree(key->poll_hotkey.action_method); 500 kfree(key->poll_hotkey.action_method);
495 kfree(key->poll_hotkey.poll_method); 501 kfree(key->poll_hotkey.poll_method);
496 kfree(key->poll_hotkey.poll_result); 502 kfree(key->poll_hotkey.poll_result);
497} 503}
498static int 504static int
499init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str, 505init_hotkey_device(union acpi_hotkey *key, char **config_entry,
500 char *method, int std_num, int external_num) 506 int std_num, int external_num)
501{ 507{
502 acpi_handle tmp_handle; 508 acpi_handle tmp_handle;
503 acpi_status status = AE_OK; 509 acpi_status status = AE_OK;
504 510
505
506 if (std_num < 0 || IS_POLL(std_num) || !key) 511 if (std_num < 0 || IS_POLL(std_num) || !key)
507 goto do_fail; 512 goto do_fail;
508 513
509 if (!bus_str || !action_str || !method) 514 if (!config_entry[bus_handle] || !config_entry[action_handle]
515 || !config_entry[method])
510 goto do_fail; 516 goto do_fail;
511 517
512 key->link.hotkey_type = ACPI_HOTKEY_EVENT; 518 key->link.hotkey_type = ACPI_HOTKEY_EVENT;
513 key->link.hotkey_standard_num = std_num; 519 key->link.hotkey_standard_num = std_num;
514 key->event_hotkey.flag = 0; 520 key->event_hotkey.flag = 0;
515 key->event_hotkey.action_method = method; 521 key->event_hotkey.action_method = config_entry[method];
516 522
517 status = 523 status = acpi_get_handle(NULL, config_entry[bus_handle],
518 acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle)); 524 &(key->event_hotkey.bus_handle));
519 if (ACPI_FAILURE(status)) 525 if (ACPI_FAILURE(status))
520 goto do_fail; 526 goto do_fail_zero;
521 key->event_hotkey.external_hotkey_num = external_num; 527 key->event_hotkey.external_hotkey_num = external_num;
522 status = 528 status = acpi_get_handle(NULL, config_entry[action_handle],
523 acpi_get_handle(NULL, action_str,
524 &(key->event_hotkey.action_handle)); 529 &(key->event_hotkey.action_handle));
525 if (ACPI_FAILURE(status)) 530 if (ACPI_FAILURE(status))
526 goto do_fail; 531 goto do_fail_zero;
527 status = acpi_get_handle(key->event_hotkey.action_handle, 532 status = acpi_get_handle(key->event_hotkey.action_handle,
528 method, &tmp_handle); 533 config_entry[method], &tmp_handle);
529 if (ACPI_FAILURE(status)) 534 if (ACPI_FAILURE(status))
530 goto do_fail; 535 goto do_fail_zero;
531 return AE_OK; 536 return AE_OK;
532 do_fail: 537do_fail_zero:
538 key->event_hotkey.action_method = NULL;
539do_fail:
533 return -ENODEV; 540 return -ENODEV;
534} 541}
535 542
536static int 543static int
537init_poll_hotkey_device(union acpi_hotkey *key, 544init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
538 char *poll_str, 545 int std_num)
539 char *poll_method,
540 char *action_str, char *action_method, int std_num)
541{ 546{
542 acpi_status status = AE_OK; 547 acpi_status status = AE_OK;
543 acpi_handle tmp_handle; 548 acpi_handle tmp_handle;
544 549
545
546 if (std_num < 0 || IS_EVENT(std_num) || !key) 550 if (std_num < 0 || IS_EVENT(std_num) || !key)
547 goto do_fail; 551 goto do_fail;
548 552 if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
549 if (!poll_str || !poll_method || !action_str || !action_method) 553 !config_entry[action_handle] || !config_entry[method])
550 goto do_fail; 554 goto do_fail;
551 555
552 key->link.hotkey_type = ACPI_HOTKEY_POLLING; 556 key->link.hotkey_type = ACPI_HOTKEY_POLLING;
553 key->link.hotkey_standard_num = std_num; 557 key->link.hotkey_standard_num = std_num;
554 key->poll_hotkey.flag = 0; 558 key->poll_hotkey.flag = 0;
555 key->poll_hotkey.poll_method = poll_method; 559 key->poll_hotkey.poll_method = config_entry[bus_method];
556 key->poll_hotkey.action_method = action_method; 560 key->poll_hotkey.action_method = config_entry[method];
557 561
558 status = 562 status = acpi_get_handle(NULL, config_entry[bus_handle],
559 acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle)); 563 &(key->poll_hotkey.poll_handle));
560 if (ACPI_FAILURE(status)) 564 if (ACPI_FAILURE(status))
561 goto do_fail; 565 goto do_fail_zero;
562 status = acpi_get_handle(key->poll_hotkey.poll_handle, 566 status = acpi_get_handle(key->poll_hotkey.poll_handle,
563 poll_method, &tmp_handle); 567 config_entry[bus_method], &tmp_handle);
564 if (ACPI_FAILURE(status)) 568 if (ACPI_FAILURE(status))
565 goto do_fail; 569 goto do_fail_zero;
566 status = 570 status =
567 acpi_get_handle(NULL, action_str, 571 acpi_get_handle(NULL, config_entry[action_handle],
568 &(key->poll_hotkey.action_handle)); 572 &(key->poll_hotkey.action_handle));
569 if (ACPI_FAILURE(status)) 573 if (ACPI_FAILURE(status))
570 goto do_fail; 574 goto do_fail_zero;
571 status = acpi_get_handle(key->poll_hotkey.action_handle, 575 status = acpi_get_handle(key->poll_hotkey.action_handle,
572 action_method, &tmp_handle); 576 config_entry[method], &tmp_handle);
573 if (ACPI_FAILURE(status)) 577 if (ACPI_FAILURE(status))
574 goto do_fail; 578 goto do_fail_zero;
575 key->poll_hotkey.poll_result = 579 key->poll_hotkey.poll_result =
576 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL); 580 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL);
577 if (!key->poll_hotkey.poll_result) 581 if (!key->poll_hotkey.poll_result)
578 goto do_fail; 582 goto do_fail_zero;
579 return AE_OK; 583 return AE_OK;
580 do_fail: 584
585do_fail_zero:
586 key->poll_hotkey.poll_method = NULL;
587 key->poll_hotkey.action_method = NULL;
588do_fail:
581 return -ENODEV; 589 return -ENODEV;
582} 590}
583 591
@@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
652} 660}
653 661
654static int 662static int
655get_parms(char *config_record, 663get_parms(char *config_record, int *cmd, char **config_entry,
656 int *cmd, 664 int *internal_event_num, int *external_event_num)
657 char **bus_handle,
658 char **bus_method,
659 char **action_handle,
660 char **method, int *internal_event_num, int *external_event_num)
661{ 665{
666/* the format of *config_record =
667 * "1:\d+:*" : "cmd:internal_event_num"
668 * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
669 * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
670 */
662 char *tmp, *tmp1, count; 671 char *tmp, *tmp1, count;
672 int i;
663 673
664 sscanf(config_record, "%d", cmd); 674 sscanf(config_record, "%d", cmd);
665
666 if (*cmd == 1) { 675 if (*cmd == 1) {
667 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) != 676 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
668 2) 677 2)
@@ -674,59 +683,27 @@ get_parms(char *config_record,
674 if (!tmp) 683 if (!tmp)
675 goto do_fail; 684 goto do_fail;
676 tmp++; 685 tmp++;
677 tmp1 = strchr(tmp, ':'); 686 for (i = 0; i < LAST_CONF_ENTRY; i++) {
678 if (!tmp1) 687 tmp1 = strchr(tmp, ':');
679 goto do_fail; 688 if (!tmp1) {
680 689 goto do_fail;
681 count = tmp1 - tmp; 690 }
682 *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL); 691 count = tmp1 - tmp;
683 if (!*bus_handle) 692 config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
684 goto do_fail; 693 if (!config_entry[i])
685 strncpy(*bus_handle, tmp, count); 694 goto handle_failure;
686 *(*bus_handle + count) = 0; 695 strncpy(config_entry[i], tmp, count);
687 696 tmp = tmp1 + 1;
688 tmp = tmp1; 697 }
689 tmp++; 698 if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
690 tmp1 = strchr(tmp, ':'); 699 goto handle_failure;
691 if (!tmp1) 700 if (!IS_OTHERS(*internal_event_num)) {
692 goto do_fail; 701 return 6;
693 count = tmp1 - tmp; 702 }
694 *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL); 703handle_failure:
695 if (!*bus_method) 704 while (i-- > 0)
696 goto do_fail; 705 kfree(config_entry[i]);
697 strncpy(*bus_method, tmp, count); 706do_fail:
698 *(*bus_method + count) = 0;
699
700 tmp = tmp1;
701 tmp++;
702 tmp1 = strchr(tmp, ':');
703 if (!tmp1)
704 goto do_fail;
705 count = tmp1 - tmp;
706 *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
707 if (!*action_handle)
708 goto do_fail;
709 strncpy(*action_handle, tmp, count);
710 *(*action_handle + count) = 0;
711
712 tmp = tmp1;
713 tmp++;
714 tmp1 = strchr(tmp, ':');
715 if (!tmp1)
716 goto do_fail;
717 count = tmp1 - tmp;
718 *method = (char *)kmalloc(count + 1, GFP_KERNEL);
719 if (!*method)
720 goto do_fail;
721 strncpy(*method, tmp, count);
722 *(*method + count) = 0;
723
724 if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <=
725 0)
726 goto do_fail;
727
728 return 6;
729 do_fail:
730 return -1; 707 return -1;
731} 708}
732 709
@@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file,
736 size_t count, loff_t * data) 713 size_t count, loff_t * data)
737{ 714{
738 char *config_record = NULL; 715 char *config_record = NULL;
739 char *bus_handle = NULL; 716 char *config_entry[LAST_CONF_ENTRY];
740 char *bus_method = NULL;
741 char *action_handle = NULL;
742 char *method = NULL;
743 int cmd, internal_event_num, external_event_num; 717 int cmd, internal_event_num, external_event_num;
744 int ret = 0; 718 int ret = 0;
745 union acpi_hotkey *key = NULL; 719 union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
746 720
721 if (!key)
722 return -ENOMEM;
747 723
748 config_record = (char *)kmalloc(count + 1, GFP_KERNEL); 724 config_record = kzalloc(count + 1, GFP_KERNEL);
749 if (!config_record) 725 if (!config_record) {
726 kfree(key);
750 return -ENOMEM; 727 return -ENOMEM;
728 }
751 729
752 if (copy_from_user(config_record, buffer, count)) { 730 if (copy_from_user(config_record, buffer, count)) {
753 kfree(config_record); 731 kfree(config_record);
732 kfree(key);
754 printk(KERN_ERR PREFIX "Invalid data\n"); 733 printk(KERN_ERR PREFIX "Invalid data\n");
755 return -EINVAL; 734 return -EINVAL;
756 } 735 }
757 config_record[count] = 0; 736 ret = get_parms(config_record, &cmd, config_entry,
758 737 &internal_event_num, &external_event_num);
759 ret = get_parms(config_record,
760 &cmd,
761 &bus_handle,
762 &bus_method,
763 &action_handle,
764 &method, &internal_event_num, &external_event_num);
765
766 kfree(config_record); 738 kfree(config_record);
767 if (IS_OTHERS(internal_event_num))
768 goto do_fail;
769 if (ret != 6) { 739 if (ret != 6) {
770 do_fail:
771 kfree(bus_handle);
772 kfree(bus_method);
773 kfree(action_handle);
774 kfree(method);
775 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret); 740 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
776 return -EINVAL; 741 return -EINVAL;
777 } 742 }
778 743
779 key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
780 if (!key)
781 goto do_fail;
782 memset(key, 0, sizeof(union acpi_hotkey));
783 if (cmd == 1) { 744 if (cmd == 1) {
784 union acpi_hotkey *tmp = NULL; 745 union acpi_hotkey *tmp = NULL;
785 tmp = get_hotkey_by_event(&global_hotkey_list, 746 tmp = get_hotkey_by_event(&global_hotkey_list,
@@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file,
791 goto cont_cmd; 752 goto cont_cmd;
792 } 753 }
793 if (IS_EVENT(internal_event_num)) { 754 if (IS_EVENT(internal_event_num)) {
794 kfree(bus_method); 755 if (init_hotkey_device(key, config_entry,
795 ret = init_hotkey_device(key, bus_handle, action_handle, method, 756 internal_event_num, external_event_num))
796 internal_event_num, 757 goto init_hotkey_fail;
797 external_event_num); 758 } else {
798 } else 759 if (init_poll_hotkey_device(key, config_entry,
799 ret = init_poll_hotkey_device(key, bus_handle, bus_method, 760 internal_event_num))
800 action_handle, method, 761 goto init_poll_hotkey_fail;
801 internal_event_num);
802 if (ret) {
803 kfree(bus_handle);
804 kfree(action_handle);
805 if (IS_EVENT(internal_event_num))
806 free_hotkey_buffer(key);
807 else
808 free_poll_hotkey_buffer(key);
809 kfree(key);
810 printk(KERN_ERR PREFIX "Invalid hotkey\n");
811 return -EINVAL;
812 } 762 }
813 763cont_cmd:
814 cont_cmd:
815 kfree(bus_handle);
816 kfree(action_handle);
817
818 switch (cmd) { 764 switch (cmd) {
819 case 0: 765 case 0:
820 if (get_hotkey_by_event 766 if (get_hotkey_by_event(&global_hotkey_list,
821 (&global_hotkey_list, key->link.hotkey_standard_num)) 767 key->link.hotkey_standard_num))
822 goto fail_out; 768 goto fail_out;
823 else 769 else
824 hotkey_add(key); 770 hotkey_add(key);
@@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file,
827 hotkey_remove(key); 773 hotkey_remove(key);
828 break; 774 break;
829 case 2: 775 case 2:
776 /* key is kfree()ed if matched*/
830 if (hotkey_update(key)) 777 if (hotkey_update(key))
831 goto fail_out; 778 goto fail_out;
832 break; 779 break;
@@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file,
835 break; 782 break;
836 } 783 }
837 return count; 784 return count;
838 fail_out: 785
839 if (IS_EVENT(internal_event_num)) 786init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
840 free_hotkey_buffer(key); 787 kfree(config_entry[bus_method]);
841 else 788 config_entry[bus_method] = NULL;
842 free_poll_hotkey_buffer(key); 789init_hotkey_fail: /* failed init_hotkey_device */
790 kfree(config_entry[method]);
791fail_out:
792 kfree(config_entry[bus_handle]);
793 kfree(config_entry[action_handle]);
794 /* No double free since elements =NULL for error cases */
795 if (IS_EVENT(internal_event_num)) {
796 if (config_entry[bus_method])
797 kfree(config_entry[bus_method]);
798 free_hotkey_buffer(key); /* frees [method] */
799 } else
800 free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
843 kfree(key); 801 kfree(key);
844 printk(KERN_ERR PREFIX "invalid key\n"); 802 printk(KERN_ERR PREFIX "invalid key\n");
845 return -EINVAL; 803 return -EINVAL;
@@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file,
923 union acpi_hotkey *key; 881 union acpi_hotkey *key;
924 882
925 883
926 arg = (char *)kmalloc(count + 1, GFP_KERNEL); 884 arg = kzalloc(count + 1, GFP_KERNEL);
927 if (!arg) 885 if (!arg)
928 return -ENOMEM; 886 return -ENOMEM;
929 arg[count] = 0;
930 887
931 if (copy_from_user(arg, buffer, count)) { 888 if (copy_from_user(arg, buffer, count)) {
932 kfree(arg); 889 kfree(arg);
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
index 84239d51dc0..6809c283ec5 100644
--- a/drivers/acpi/i2c_ec.c
+++ b/drivers/acpi/i2c_ec.c
@@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device)
330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val); 330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
331 if (ACPI_FAILURE(status)) { 331 if (ACPI_FAILURE(status)) {
332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n")); 332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
333 kfree(ec_hc->smbus); 333 kfree(ec_hc);
334 kfree(smbus); 334 kfree(smbus);
335 return -EIO; 335 return -EIO;
336 } 336 }
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b7d1514cd19..507f051d1ce 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
747 handle, units, timeout)); 747 handle, units, timeout));
748 748
749 /*
750 * This can be called during resume with interrupts off.
751 * Like boot-time, we should be single threaded and will
752 * always get the lock if we try -- timeout or not.
753 * If this doesn't succeed, then we will oops courtesy of
754 * might_sleep() in down().
755 */
756 if (!down_trylock(sem))
757 return AE_OK;
758
749 switch (timeout) { 759 switch (timeout) {
750 /* 760 /*
751 * No Wait: 761 * No Wait:
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index db7b350a503..62bef0b3b61 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void)
1714{ 1714{
1715 int result = 0; 1715 int result = 0;
1716 1716
1717 if (acpi_disabled)
1718 return -ENODEV;
1719
1717 init_MUTEX(&sbs_sem); 1720 init_MUTEX(&sbs_sem);
1718 1721
1719 if (capacity_mode != DEF_CAPACITY_UNIT 1722 if (capacity_mode != DEF_CAPACITY_UNIT
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5fcb50c7b77..698a1540e30 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/kernel.h>
7#include <linux/acpi.h> 8#include <linux/acpi.h>
8 9
9#include <acpi/acpi_drivers.h> 10#include <acpi/acpi_drivers.h>
@@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = {
113static void acpi_device_register(struct acpi_device *device, 114static void acpi_device_register(struct acpi_device *device,
114 struct acpi_device *parent) 115 struct acpi_device *parent)
115{ 116{
117 int err;
118
116 /* 119 /*
117 * Linkage 120 * Linkage
118 * ------- 121 * -------
@@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device,
138 device->kobj.parent = &parent->kobj; 141 device->kobj.parent = &parent->kobj;
139 device->kobj.ktype = &ktype_acpi_ns; 142 device->kobj.ktype = &ktype_acpi_ns;
140 device->kobj.kset = &acpi_namespace_kset; 143 device->kobj.kset = &acpi_namespace_kset;
141 kobject_register(&device->kobj); 144 err = kobject_register(&device->kobj);
145 if (err < 0)
146 printk(KERN_WARNING "%s: kobject_register error: %d\n",
147 __FUNCTION__, err);
142 create_sysfs_device_files(device); 148 create_sysfs_device_files(device);
143} 149}
144 150
@@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void)
1450 if (acpi_disabled) 1456 if (acpi_disabled)
1451 return 0; 1457 return 0;
1452 1458
1453 kset_register(&acpi_namespace_kset); 1459 result = kset_register(&acpi_namespace_kset);
1460 if (result < 0)
1461 printk(KERN_ERR PREFIX "kset_register error: %d\n", result);
1454 1462
1455 result = bus_register(&acpi_bus_type); 1463 result = bus_register(&acpi_bus_type);
1456 if (result) { 1464 if (result) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f48227f4c8c..d0d84c43a9d 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle,
262 if (!data) 262 if (!data)
263 return AE_BAD_PARAMETER; 263 return AE_BAD_PARAMETER;
264 264
265 element = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 265 element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
266 if (!element) 266 if (!element)
267 return AE_NO_MEMORY; 267 return AE_NO_MEMORY;
268 268
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d7de1753e09..e9b0957f15d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
64 "Node %d Mapped: %8lu kB\n" 64 "Node %d Mapped: %8lu kB\n"
65 "Node %d AnonPages: %8lu kB\n" 65 "Node %d AnonPages: %8lu kB\n"
66 "Node %d PageTables: %8lu kB\n" 66 "Node %d PageTables: %8lu kB\n"
67 "Node %d NFS Unstable: %8lu kB\n" 67 "Node %d NFS_Unstable: %8lu kB\n"
68 "Node %d Bounce: %8lu kB\n" 68 "Node %d Bounce: %8lu kB\n"
69 "Node %d Slab: %8lu kB\n", 69 "Node %d Slab: %8lu kB\n",
70 nid, K(i.totalram), 70 nid, K(i.totalram),
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index b6ee50a2916..fa708248976 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -266,7 +266,7 @@ repeat:
266 goto out; 266 goto out;
267 267
268 if (req->cmd != READ) { 268 if (req->cmd != READ) {
269 printk("GSCD: bad cmd %lu\n", rq_data_dir(req)); 269 printk("GSCD: bad cmd %u\n", rq_data_dir(req));
270 end_request(req, 0); 270 end_request(req, 0);
271 goto repeat; 271 goto repeat;
272 } 272 }
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 5bb2234a909..39a7f685e3f 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -175,6 +175,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
175 } 175 }
176 break; 176 break;
177 177
178 case R200_EMIT_VAP_CTL:{
179 RING_LOCALS;
180 BEGIN_RING(2);
181 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
182 ADVANCE_RING();
183 }
184 break;
185
178 case RADEON_EMIT_RB3D_COLORPITCH: 186 case RADEON_EMIT_RB3D_COLORPITCH:
179 case RADEON_EMIT_RE_LINE_PATTERN: 187 case RADEON_EMIT_RE_LINE_PATTERN:
180 case RADEON_EMIT_SE_LINE_WIDTH: 188 case RADEON_EMIT_SE_LINE_WIDTH:
@@ -202,7 +210,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
202 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: 210 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
203 case R200_EMIT_TFACTOR_0: 211 case R200_EMIT_TFACTOR_0:
204 case R200_EMIT_VTX_FMT_0: 212 case R200_EMIT_VTX_FMT_0:
205 case R200_EMIT_VAP_CTL:
206 case R200_EMIT_MATRIX_SELECT_0: 213 case R200_EMIT_MATRIX_SELECT_0:
207 case R200_EMIT_TEX_PROC_CTL_2: 214 case R200_EMIT_TEX_PROC_CTL_2:
208 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: 215 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4ea7bd5f4f5..a369dd6877d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -142,6 +142,7 @@ typedef struct _moxa_board_conf {
142 142
143static moxa_board_conf moxa_boards[MAX_BOARDS]; 143static moxa_board_conf moxa_boards[MAX_BOARDS];
144static void __iomem *moxaBaseAddr[MAX_BOARDS]; 144static void __iomem *moxaBaseAddr[MAX_BOARDS];
145static int loadstat[MAX_BOARDS];
145 146
146struct moxa_str { 147struct moxa_str {
147 int type; 148 int type;
@@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void)
1688 if (moxaCard == 0) 1689 if (moxaCard == 0)
1689 return (-1); 1690 return (-1);
1690 for (card = 0; card < MAX_BOARDS; card++) { 1691 for (card = 0; card < MAX_BOARDS; card++) {
1692 if (loadstat[card] == 0)
1693 continue;
1691 if ((ports = moxa_boards[card].numPorts) == 0) 1694 if ((ports = moxa_boards[card].numPorts) == 0)
1692 continue; 1695 continue;
1693 if (readb(moxaIntPend[card]) == 0xff) { 1696 if (readb(moxaIntPend[card]) == 0xff) {
@@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
2903 } 2906 }
2904 break; 2907 break;
2905 } 2908 }
2909 loadstat[cardno] = 1;
2906 return (0); 2910 return (0);
2907} 2911}
2908 2912
@@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2920 len1 = len >> 1; 2924 len1 = len >> 1;
2921 ptr = (ushort *) moxaBuff; 2925 ptr = (ushort *) moxaBuff;
2922 for (i = 0; i < len1; i++) 2926 for (i = 0; i < len1; i++)
2923 usum += *(ptr + i); 2927 usum += le16_to_cpu(*(ptr + i));
2924 retry = 0; 2928 retry = 0;
2925 do { 2929 do {
2926 len1 = len >> 1; 2930 len1 = len >> 1;
@@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2992 wlen = len >> 1; 2996 wlen = len >> 1;
2993 uptr = (ushort *) moxaBuff; 2997 uptr = (ushort *) moxaBuff;
2994 for (i = 0; i < wlen; i++) 2998 for (i = 0; i < wlen; i++)
2995 usum += uptr[i]; 2999 usum += le16_to_cpu(uptr[i]);
2996 retry = 0; 3000 retry = 0;
2997 j = 0; 3001 j = 0;
2998 do { 3002 do {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index bfdb90242a9..bb0d9199e99 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,6 +153,15 @@ int tty_ioctl(struct inode * inode, struct file * file,
153static int tty_fasync(int fd, struct file * filp, int on); 153static int tty_fasync(int fd, struct file * filp, int on);
154static void release_mem(struct tty_struct *tty, int idx); 154static void release_mem(struct tty_struct *tty, int idx);
155 155
156/**
157 * alloc_tty_struct - allocate a tty object
158 *
159 * Return a new empty tty structure. The data fields have not
160 * been initialized in any way but has been zeroed
161 *
162 * Locking: none
163 * FIXME: use kzalloc
164 */
156 165
157static struct tty_struct *alloc_tty_struct(void) 166static struct tty_struct *alloc_tty_struct(void)
158{ 167{
@@ -166,6 +175,15 @@ static struct tty_struct *alloc_tty_struct(void)
166 175
167static void tty_buffer_free_all(struct tty_struct *); 176static void tty_buffer_free_all(struct tty_struct *);
168 177
178/**
179 * free_tty_struct - free a disused tty
180 * @tty: tty struct to free
181 *
182 * Free the write buffers, tty queue and tty memory itself.
183 *
184 * Locking: none. Must be called after tty is definitely unused
185 */
186
169static inline void free_tty_struct(struct tty_struct *tty) 187static inline void free_tty_struct(struct tty_struct *tty)
170{ 188{
171 kfree(tty->write_buf); 189 kfree(tty->write_buf);
@@ -175,6 +193,17 @@ static inline void free_tty_struct(struct tty_struct *tty)
175 193
176#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) 194#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
177 195
196/**
197 * tty_name - return tty naming
198 * @tty: tty structure
199 * @buf: buffer for output
200 *
201 * Convert a tty structure into a name. The name reflects the kernel
202 * naming policy and if udev is in use may not reflect user space
203 *
204 * Locking: none
205 */
206
178char *tty_name(struct tty_struct *tty, char *buf) 207char *tty_name(struct tty_struct *tty, char *buf)
179{ 208{
180 if (!tty) /* Hmm. NULL pointer. That's fun. */ 209 if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -235,6 +264,28 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
235 * Tty buffer allocation management 264 * Tty buffer allocation management
236 */ 265 */
237 266
267
268/**
269 * tty_buffer_free_all - free buffers used by a tty
270 * @tty: tty to free from
271 *
272 * Remove all the buffers pending on a tty whether queued with data
273 * or in the free ring. Must be called when the tty is no longer in use
274 *
275 * Locking: none
276 */
277
278
279/**
280 * tty_buffer_free_all - free buffers used by a tty
281 * @tty: tty to free from
282 *
283 * Remove all the buffers pending on a tty whether queued with data
284 * or in the free ring. Must be called when the tty is no longer in use
285 *
286 * Locking: none
287 */
288
238static void tty_buffer_free_all(struct tty_struct *tty) 289static void tty_buffer_free_all(struct tty_struct *tty)
239{ 290{
240 struct tty_buffer *thead; 291 struct tty_buffer *thead;
@@ -247,19 +298,47 @@ static void tty_buffer_free_all(struct tty_struct *tty)
247 kfree(thead); 298 kfree(thead);
248 } 299 }
249 tty->buf.tail = NULL; 300 tty->buf.tail = NULL;
301 tty->buf.memory_used = 0;
250} 302}
251 303
304/**
305 * tty_buffer_init - prepare a tty buffer structure
306 * @tty: tty to initialise
307 *
308 * Set up the initial state of the buffer management for a tty device.
309 * Must be called before the other tty buffer functions are used.
310 *
311 * Locking: none
312 */
313
252static void tty_buffer_init(struct tty_struct *tty) 314static void tty_buffer_init(struct tty_struct *tty)
253{ 315{
254 spin_lock_init(&tty->buf.lock); 316 spin_lock_init(&tty->buf.lock);
255 tty->buf.head = NULL; 317 tty->buf.head = NULL;
256 tty->buf.tail = NULL; 318 tty->buf.tail = NULL;
257 tty->buf.free = NULL; 319 tty->buf.free = NULL;
320 tty->buf.memory_used = 0;
258} 321}
259 322
260static struct tty_buffer *tty_buffer_alloc(size_t size) 323/**
324 * tty_buffer_alloc - allocate a tty buffer
325 * @tty: tty device
326 * @size: desired size (characters)
327 *
328 * Allocate a new tty buffer to hold the desired number of characters.
329 * Return NULL if out of memory or the allocation would exceed the
330 * per device queue
331 *
332 * Locking: Caller must hold tty->buf.lock
333 */
334
335static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
261{ 336{
262 struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 337 struct tty_buffer *p;
338
339 if (tty->buf.memory_used + size > 65536)
340 return NULL;
341 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
263 if(p == NULL) 342 if(p == NULL)
264 return NULL; 343 return NULL;
265 p->used = 0; 344 p->used = 0;
@@ -269,17 +348,27 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
269 p->read = 0; 348 p->read = 0;
270 p->char_buf_ptr = (char *)(p->data); 349 p->char_buf_ptr = (char *)(p->data);
271 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; 350 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
272/* printk("Flip create %p\n", p); */ 351 tty->buf.memory_used += size;
273 return p; 352 return p;
274} 353}
275 354
276/* Must be called with the tty_read lock held. This needs to acquire strategy 355/**
277 code to decide if we should kfree or relink a given expired buffer */ 356 * tty_buffer_free - free a tty buffer
357 * @tty: tty owning the buffer
358 * @b: the buffer to free
359 *
360 * Free a tty buffer, or add it to the free list according to our
361 * internal strategy
362 *
363 * Locking: Caller must hold tty->buf.lock
364 */
278 365
279static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) 366static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
280{ 367{
281 /* Dumb strategy for now - should keep some stats */ 368 /* Dumb strategy for now - should keep some stats */
282/* printk("Flip dispose %p\n", b); */ 369 tty->buf.memory_used -= b->size;
370 WARN_ON(tty->buf.memory_used < 0);
371
283 if(b->size >= 512) 372 if(b->size >= 512)
284 kfree(b); 373 kfree(b);
285 else { 374 else {
@@ -288,6 +377,18 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
288 } 377 }
289} 378}
290 379
380/**
381 * tty_buffer_find - find a free tty buffer
382 * @tty: tty owning the buffer
383 * @size: characters wanted
384 *
385 * Locate an existing suitable tty buffer or if we are lacking one then
386 * allocate a new one. We round our buffers off in 256 character chunks
387 * to get better allocation behaviour.
388 *
389 * Locking: Caller must hold tty->buf.lock
390 */
391
291static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) 392static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
292{ 393{
293 struct tty_buffer **tbh = &tty->buf.free; 394 struct tty_buffer **tbh = &tty->buf.free;
@@ -299,20 +400,28 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
299 t->used = 0; 400 t->used = 0;
300 t->commit = 0; 401 t->commit = 0;
301 t->read = 0; 402 t->read = 0;
302 /* DEBUG ONLY */ 403 tty->buf.memory_used += t->size;
303/* memset(t->data, '*', size); */
304/* printk("Flip recycle %p\n", t); */
305 return t; 404 return t;
306 } 405 }
307 tbh = &((*tbh)->next); 406 tbh = &((*tbh)->next);
308 } 407 }
309 /* Round the buffer size out */ 408 /* Round the buffer size out */
310 size = (size + 0xFF) & ~ 0xFF; 409 size = (size + 0xFF) & ~ 0xFF;
311 return tty_buffer_alloc(size); 410 return tty_buffer_alloc(tty, size);
312 /* Should possibly check if this fails for the largest buffer we 411 /* Should possibly check if this fails for the largest buffer we
313 have queued and recycle that ? */ 412 have queued and recycle that ? */
314} 413}
315 414
415/**
416 * tty_buffer_request_room - grow tty buffer if needed
417 * @tty: tty structure
418 * @size: size desired
419 *
420 * Make at least size bytes of linear space available for the tty
421 * buffer. If we fail return the size we managed to find.
422 *
423 * Locking: Takes tty->buf.lock
424 */
316int tty_buffer_request_room(struct tty_struct *tty, size_t size) 425int tty_buffer_request_room(struct tty_struct *tty, size_t size)
317{ 426{
318 struct tty_buffer *b, *n; 427 struct tty_buffer *b, *n;
@@ -347,6 +456,18 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
347} 456}
348EXPORT_SYMBOL_GPL(tty_buffer_request_room); 457EXPORT_SYMBOL_GPL(tty_buffer_request_room);
349 458
459/**
460 * tty_insert_flip_string - Add characters to the tty buffer
461 * @tty: tty structure
462 * @chars: characters
463 * @size: size
464 *
465 * Queue a series of bytes to the tty buffering. All the characters
466 * passed are marked as without error. Returns the number added.
467 *
468 * Locking: Called functions may take tty->buf.lock
469 */
470
350int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, 471int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
351 size_t size) 472 size_t size)
352{ 473{
@@ -370,6 +491,20 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
370} 491}
371EXPORT_SYMBOL(tty_insert_flip_string); 492EXPORT_SYMBOL(tty_insert_flip_string);
372 493
494/**
495 * tty_insert_flip_string_flags - Add characters to the tty buffer
496 * @tty: tty structure
497 * @chars: characters
498 * @flags: flag bytes
499 * @size: size
500 *
501 * Queue a series of bytes to the tty buffering. For each character
502 * the flags array indicates the status of the character. Returns the
503 * number added.
504 *
505 * Locking: Called functions may take tty->buf.lock
506 */
507
373int tty_insert_flip_string_flags(struct tty_struct *tty, 508int tty_insert_flip_string_flags(struct tty_struct *tty,
374 const unsigned char *chars, const char *flags, size_t size) 509 const unsigned char *chars, const char *flags, size_t size)
375{ 510{
@@ -394,6 +529,17 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
394} 529}
395EXPORT_SYMBOL(tty_insert_flip_string_flags); 530EXPORT_SYMBOL(tty_insert_flip_string_flags);
396 531
532/**
533 * tty_schedule_flip - push characters to ldisc
534 * @tty: tty to push from
535 *
536 * Takes any pending buffers and transfers their ownership to the
537 * ldisc side of the queue. It then schedules those characters for
538 * processing by the line discipline.
539 *
540 * Locking: Takes tty->buf.lock
541 */
542
397void tty_schedule_flip(struct tty_struct *tty) 543void tty_schedule_flip(struct tty_struct *tty)
398{ 544{
399 unsigned long flags; 545 unsigned long flags;
@@ -405,12 +551,19 @@ void tty_schedule_flip(struct tty_struct *tty)
405} 551}
406EXPORT_SYMBOL(tty_schedule_flip); 552EXPORT_SYMBOL(tty_schedule_flip);
407 553
408/* 554/**
555 * tty_prepare_flip_string - make room for characters
556 * @tty: tty
557 * @chars: return pointer for character write area
558 * @size: desired size
559 *
409 * Prepare a block of space in the buffer for data. Returns the length 560 * Prepare a block of space in the buffer for data. Returns the length
410 * available and buffer pointer to the space which is now allocated and 561 * available and buffer pointer to the space which is now allocated and
411 * accounted for as ready for normal characters. This is used for drivers 562 * accounted for as ready for normal characters. This is used for drivers
412 * that need their own block copy routines into the buffer. There is no 563 * that need their own block copy routines into the buffer. There is no
413 * guarantee the buffer is a DMA target! 564 * guarantee the buffer is a DMA target!
565 *
566 * Locking: May call functions taking tty->buf.lock
414 */ 567 */
415 568
416int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) 569int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
@@ -427,12 +580,20 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_
427 580
428EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 581EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
429 582
430/* 583/**
584 * tty_prepare_flip_string_flags - make room for characters
585 * @tty: tty
586 * @chars: return pointer for character write area
587 * @flags: return pointer for status flag write area
588 * @size: desired size
589 *
431 * Prepare a block of space in the buffer for data. Returns the length 590 * Prepare a block of space in the buffer for data. Returns the length
432 * available and buffer pointer to the space which is now allocated and 591 * available and buffer pointer to the space which is now allocated and
433 * accounted for as ready for characters. This is used for drivers 592 * accounted for as ready for characters. This is used for drivers
434 * that need their own block copy routines into the buffer. There is no 593 * that need their own block copy routines into the buffer. There is no
435 * guarantee the buffer is a DMA target! 594 * guarantee the buffer is a DMA target!
595 *
596 * Locking: May call functions taking tty->buf.lock
436 */ 597 */
437 598
438int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) 599int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
@@ -451,10 +612,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
451 612
452 613
453 614
454/* 615/**
616 * tty_set_termios_ldisc - set ldisc field
617 * @tty: tty structure
618 * @num: line discipline number
619 *
455 * This is probably overkill for real world processors but 620 * This is probably overkill for real world processors but
456 * they are not on hot paths so a little discipline won't do 621 * they are not on hot paths so a little discipline won't do
457 * any harm. 622 * any harm.
623 *
624 * Locking: takes termios_sem
458 */ 625 */
459 626
460static void tty_set_termios_ldisc(struct tty_struct *tty, int num) 627static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
@@ -474,6 +641,19 @@ static DEFINE_SPINLOCK(tty_ldisc_lock);
474static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 641static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
475static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ 642static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
476 643
644/**
645 * tty_register_ldisc - install a line discipline
646 * @disc: ldisc number
647 * @new_ldisc: pointer to the ldisc object
648 *
649 * Installs a new line discipline into the kernel. The discipline
650 * is set up as unreferenced and then made available to the kernel
651 * from this point onwards.
652 *
653 * Locking:
654 * takes tty_ldisc_lock to guard against ldisc races
655 */
656
477int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) 657int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
478{ 658{
479 unsigned long flags; 659 unsigned long flags;
@@ -493,6 +673,18 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
493} 673}
494EXPORT_SYMBOL(tty_register_ldisc); 674EXPORT_SYMBOL(tty_register_ldisc);
495 675
676/**
677 * tty_unregister_ldisc - unload a line discipline
678 * @disc: ldisc number
679 * @new_ldisc: pointer to the ldisc object
680 *
681 * Remove a line discipline from the kernel providing it is not
682 * currently in use.
683 *
684 * Locking:
685 * takes tty_ldisc_lock to guard against ldisc races
686 */
687
496int tty_unregister_ldisc(int disc) 688int tty_unregister_ldisc(int disc)
497{ 689{
498 unsigned long flags; 690 unsigned long flags;
@@ -512,6 +704,19 @@ int tty_unregister_ldisc(int disc)
512} 704}
513EXPORT_SYMBOL(tty_unregister_ldisc); 705EXPORT_SYMBOL(tty_unregister_ldisc);
514 706
707/**
708 * tty_ldisc_get - take a reference to an ldisc
709 * @disc: ldisc number
710 *
711 * Takes a reference to a line discipline. Deals with refcounts and
712 * module locking counts. Returns NULL if the discipline is not available.
713 * Returns a pointer to the discipline and bumps the ref count if it is
714 * available
715 *
716 * Locking:
717 * takes tty_ldisc_lock to guard against ldisc races
718 */
719
515struct tty_ldisc *tty_ldisc_get(int disc) 720struct tty_ldisc *tty_ldisc_get(int disc)
516{ 721{
517 unsigned long flags; 722 unsigned long flags;
@@ -540,6 +745,17 @@ struct tty_ldisc *tty_ldisc_get(int disc)
540 745
541EXPORT_SYMBOL_GPL(tty_ldisc_get); 746EXPORT_SYMBOL_GPL(tty_ldisc_get);
542 747
748/**
749 * tty_ldisc_put - drop ldisc reference
750 * @disc: ldisc number
751 *
752 * Drop a reference to a line discipline. Manage refcounts and
753 * module usage counts
754 *
755 * Locking:
756 * takes tty_ldisc_lock to guard against ldisc races
757 */
758
543void tty_ldisc_put(int disc) 759void tty_ldisc_put(int disc)
544{ 760{
545 struct tty_ldisc *ld; 761 struct tty_ldisc *ld;
@@ -557,6 +773,19 @@ void tty_ldisc_put(int disc)
557 773
558EXPORT_SYMBOL_GPL(tty_ldisc_put); 774EXPORT_SYMBOL_GPL(tty_ldisc_put);
559 775
776/**
777 * tty_ldisc_assign - set ldisc on a tty
778 * @tty: tty to assign
779 * @ld: line discipline
780 *
781 * Install an instance of a line discipline into a tty structure. The
782 * ldisc must have a reference count above zero to ensure it remains/
783 * The tty instance refcount starts at zero.
784 *
785 * Locking:
786 * Caller must hold references
787 */
788
560static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) 789static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
561{ 790{
562 tty->ldisc = *ld; 791 tty->ldisc = *ld;
@@ -571,6 +800,8 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
571 * the tty ldisc. Return 0 on failure or 1 on success. This is 800 * the tty ldisc. Return 0 on failure or 1 on success. This is
572 * used to implement both the waiting and non waiting versions 801 * used to implement both the waiting and non waiting versions
573 * of tty_ldisc_ref 802 * of tty_ldisc_ref
803 *
804 * Locking: takes tty_ldisc_lock
574 */ 805 */
575 806
576static int tty_ldisc_try(struct tty_struct *tty) 807static int tty_ldisc_try(struct tty_struct *tty)
@@ -602,6 +833,8 @@ static int tty_ldisc_try(struct tty_struct *tty)
602 * must also be careful not to hold other locks that will deadlock 833 * must also be careful not to hold other locks that will deadlock
603 * against a discipline change, such as an existing ldisc reference 834 * against a discipline change, such as an existing ldisc reference
604 * (which we check for) 835 * (which we check for)
836 *
837 * Locking: call functions take tty_ldisc_lock
605 */ 838 */
606 839
607struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 840struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
@@ -622,6 +855,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
622 * Dereference the line discipline for the terminal and take a 855 * Dereference the line discipline for the terminal and take a
623 * reference to it. If the line discipline is in flux then 856 * reference to it. If the line discipline is in flux then
624 * return NULL. Can be called from IRQ and timer functions. 857 * return NULL. Can be called from IRQ and timer functions.
858 *
859 * Locking: called functions take tty_ldisc_lock
625 */ 860 */
626 861
627struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) 862struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
@@ -639,6 +874,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
639 * 874 *
640 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May 875 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
641 * be called in IRQ context. 876 * be called in IRQ context.
877 *
878 * Locking: takes tty_ldisc_lock
642 */ 879 */
643 880
644void tty_ldisc_deref(struct tty_ldisc *ld) 881void tty_ldisc_deref(struct tty_ldisc *ld)
@@ -683,6 +920,9 @@ static void tty_ldisc_enable(struct tty_struct *tty)
683 * 920 *
684 * Set the discipline of a tty line. Must be called from a process 921 * Set the discipline of a tty line. Must be called from a process
685 * context. 922 * context.
923 *
924 * Locking: takes tty_ldisc_lock.
925 * called functions take termios_sem
686 */ 926 */
687 927
688static int tty_set_ldisc(struct tty_struct *tty, int ldisc) 928static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
@@ -846,9 +1086,17 @@ restart:
846 return retval; 1086 return retval;
847} 1087}
848 1088
849/* 1089/**
850 * This routine returns a tty driver structure, given a device number 1090 * get_tty_driver - find device of a tty
1091 * @dev_t: device identifier
1092 * @index: returns the index of the tty
1093 *
1094 * This routine returns a tty driver structure, given a device number
1095 * and also passes back the index number.
1096 *
1097 * Locking: caller must hold tty_mutex
851 */ 1098 */
1099
852static struct tty_driver *get_tty_driver(dev_t device, int *index) 1100static struct tty_driver *get_tty_driver(dev_t device, int *index)
853{ 1101{
854 struct tty_driver *p; 1102 struct tty_driver *p;
@@ -863,11 +1111,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
863 return NULL; 1111 return NULL;
864} 1112}
865 1113
866/* 1114/**
867 * If we try to write to, or set the state of, a terminal and we're 1115 * tty_check_change - check for POSIX terminal changes
868 * not in the foreground, send a SIGTTOU. If the signal is blocked or 1116 * @tty: tty to check
869 * ignored, go ahead and perform the operation. (POSIX 7.2) 1117 *
1118 * If we try to write to, or set the state of, a terminal and we're
1119 * not in the foreground, send a SIGTTOU. If the signal is blocked or
1120 * ignored, go ahead and perform the operation. (POSIX 7.2)
1121 *
1122 * Locking: none
870 */ 1123 */
1124
871int tty_check_change(struct tty_struct * tty) 1125int tty_check_change(struct tty_struct * tty)
872{ 1126{
873 if (current->signal->tty != tty) 1127 if (current->signal->tty != tty)
@@ -1005,10 +1259,27 @@ void tty_ldisc_flush(struct tty_struct *tty)
1005 1259
1006EXPORT_SYMBOL_GPL(tty_ldisc_flush); 1260EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1007 1261
1008/* 1262/**
1009 * This can be called by the "eventd" kernel thread. That is process synchronous, 1263 * do_tty_hangup - actual handler for hangup events
1010 * but doesn't hold any locks, so we need to make sure we have the appropriate 1264 * @data: tty device
1011 * locks for what we're doing.. 1265 *
1266 * This can be called by the "eventd" kernel thread. That is process
1267 * synchronous but doesn't hold any locks, so we need to make sure we
1268 * have the appropriate locks for what we're doing.
1269 *
1270 * The hangup event clears any pending redirections onto the hung up
1271 * device. It ensures future writes will error and it does the needed
1272 * line discipline hangup and signal delivery. The tty object itself
1273 * remains intact.
1274 *
1275 * Locking:
1276 * BKL
1277 * redirect lock for undoing redirection
1278 * file list lock for manipulating list of ttys
1279 * tty_ldisc_lock from called functions
1280 * termios_sem resetting termios data
1281 * tasklist_lock to walk task list for hangup event
1282 *
1012 */ 1283 */
1013static void do_tty_hangup(void *data) 1284static void do_tty_hangup(void *data)
1014{ 1285{
@@ -1133,6 +1404,14 @@ static void do_tty_hangup(void *data)
1133 fput(f); 1404 fput(f);
1134} 1405}
1135 1406
1407/**
1408 * tty_hangup - trigger a hangup event
1409 * @tty: tty to hangup
1410 *
1411 * A carrier loss (virtual or otherwise) has occurred on this like
1412 * schedule a hangup sequence to run after this event.
1413 */
1414
1136void tty_hangup(struct tty_struct * tty) 1415void tty_hangup(struct tty_struct * tty)
1137{ 1416{
1138#ifdef TTY_DEBUG_HANGUP 1417#ifdef TTY_DEBUG_HANGUP
@@ -1145,6 +1424,15 @@ void tty_hangup(struct tty_struct * tty)
1145 1424
1146EXPORT_SYMBOL(tty_hangup); 1425EXPORT_SYMBOL(tty_hangup);
1147 1426
1427/**
1428 * tty_vhangup - process vhangup
1429 * @tty: tty to hangup
1430 *
1431 * The user has asked via system call for the terminal to be hung up.
1432 * We do this synchronously so that when the syscall returns the process
1433 * is complete. That guarantee is neccessary for security reasons.
1434 */
1435
1148void tty_vhangup(struct tty_struct * tty) 1436void tty_vhangup(struct tty_struct * tty)
1149{ 1437{
1150#ifdef TTY_DEBUG_HANGUP 1438#ifdef TTY_DEBUG_HANGUP
@@ -1156,6 +1444,14 @@ void tty_vhangup(struct tty_struct * tty)
1156} 1444}
1157EXPORT_SYMBOL(tty_vhangup); 1445EXPORT_SYMBOL(tty_vhangup);
1158 1446
1447/**
1448 * tty_hung_up_p - was tty hung up
1449 * @filp: file pointer of tty
1450 *
1451 * Return true if the tty has been subject to a vhangup or a carrier
1452 * loss
1453 */
1454
1159int tty_hung_up_p(struct file * filp) 1455int tty_hung_up_p(struct file * filp)
1160{ 1456{
1161 return (filp->f_op == &hung_up_tty_fops); 1457 return (filp->f_op == &hung_up_tty_fops);
@@ -1163,19 +1459,28 @@ int tty_hung_up_p(struct file * filp)
1163 1459
1164EXPORT_SYMBOL(tty_hung_up_p); 1460EXPORT_SYMBOL(tty_hung_up_p);
1165 1461
1166/* 1462/**
1167 * This function is typically called only by the session leader, when 1463 * disassociate_ctty - disconnect controlling tty
1168 * it wants to disassociate itself from its controlling tty. 1464 * @on_exit: true if exiting so need to "hang up" the session
1465 *
1466 * This function is typically called only by the session leader, when
1467 * it wants to disassociate itself from its controlling tty.
1169 * 1468 *
1170 * It performs the following functions: 1469 * It performs the following functions:
1171 * (1) Sends a SIGHUP and SIGCONT to the foreground process group 1470 * (1) Sends a SIGHUP and SIGCONT to the foreground process group
1172 * (2) Clears the tty from being controlling the session 1471 * (2) Clears the tty from being controlling the session
1173 * (3) Clears the controlling tty for all processes in the 1472 * (3) Clears the controlling tty for all processes in the
1174 * session group. 1473 * session group.
1175 * 1474 *
1176 * The argument on_exit is set to 1 if called when a process is 1475 * The argument on_exit is set to 1 if called when a process is
1177 * exiting; it is 0 if called by the ioctl TIOCNOTTY. 1476 * exiting; it is 0 if called by the ioctl TIOCNOTTY.
1477 *
1478 * Locking: tty_mutex is taken to protect current->signal->tty
1479 * BKL is taken for hysterical raisins
1480 * Tasklist lock is taken (under tty_mutex) to walk process
1481 * lists for the session.
1178 */ 1482 */
1483
1179void disassociate_ctty(int on_exit) 1484void disassociate_ctty(int on_exit)
1180{ 1485{
1181 struct tty_struct *tty; 1486 struct tty_struct *tty;
@@ -1222,6 +1527,25 @@ void disassociate_ctty(int on_exit)
1222 unlock_kernel(); 1527 unlock_kernel();
1223} 1528}
1224 1529
1530
1531/**
1532 * stop_tty - propogate flow control
1533 * @tty: tty to stop
1534 *
1535 * Perform flow control to the driver. For PTY/TTY pairs we
1536 * must also propogate the TIOCKPKT status. May be called
1537 * on an already stopped device and will not re-call the driver
1538 * method.
1539 *
1540 * This functionality is used by both the line disciplines for
1541 * halting incoming flow and by the driver. It may therefore be
1542 * called from any context, may be under the tty atomic_write_lock
1543 * but not always.
1544 *
1545 * Locking:
1546 * Broken. Relies on BKL which is unsafe here.
1547 */
1548
1225void stop_tty(struct tty_struct *tty) 1549void stop_tty(struct tty_struct *tty)
1226{ 1550{
1227 if (tty->stopped) 1551 if (tty->stopped)
@@ -1238,6 +1562,19 @@ void stop_tty(struct tty_struct *tty)
1238 1562
1239EXPORT_SYMBOL(stop_tty); 1563EXPORT_SYMBOL(stop_tty);
1240 1564
1565/**
1566 * start_tty - propogate flow control
1567 * @tty: tty to start
1568 *
1569 * Start a tty that has been stopped if at all possible. Perform
1570 * any neccessary wakeups and propogate the TIOCPKT status. If this
1571 * is the tty was previous stopped and is being started then the
1572 * driver start method is invoked and the line discipline woken.
1573 *
1574 * Locking:
1575 * Broken. Relies on BKL which is unsafe here.
1576 */
1577
1241void start_tty(struct tty_struct *tty) 1578void start_tty(struct tty_struct *tty)
1242{ 1579{
1243 if (!tty->stopped || tty->flow_stopped) 1580 if (!tty->stopped || tty->flow_stopped)
@@ -1258,6 +1595,23 @@ void start_tty(struct tty_struct *tty)
1258 1595
1259EXPORT_SYMBOL(start_tty); 1596EXPORT_SYMBOL(start_tty);
1260 1597
1598/**
1599 * tty_read - read method for tty device files
1600 * @file: pointer to tty file
1601 * @buf: user buffer
1602 * @count: size of user buffer
1603 * @ppos: unused
1604 *
1605 * Perform the read system call function on this terminal device. Checks
1606 * for hung up devices before calling the line discipline method.
1607 *
1608 * Locking:
1609 * Locks the line discipline internally while needed
1610 * For historical reasons the line discipline read method is
1611 * invoked under the BKL. This will go away in time so do not rely on it
1612 * in new code. Multiple read calls may be outstanding in parallel.
1613 */
1614
1261static ssize_t tty_read(struct file * file, char __user * buf, size_t count, 1615static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
1262 loff_t *ppos) 1616 loff_t *ppos)
1263{ 1617{
@@ -1302,6 +1656,7 @@ static inline ssize_t do_tty_write(
1302 ssize_t ret = 0, written = 0; 1656 ssize_t ret = 0, written = 0;
1303 unsigned int chunk; 1657 unsigned int chunk;
1304 1658
1659 /* FIXME: O_NDELAY ... */
1305 if (mutex_lock_interruptible(&tty->atomic_write_lock)) { 1660 if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
1306 return -ERESTARTSYS; 1661 return -ERESTARTSYS;
1307 } 1662 }
@@ -1318,6 +1673,9 @@ static inline ssize_t do_tty_write(
1318 * layer has problems with bigger chunks. It will 1673 * layer has problems with bigger chunks. It will
1319 * claim to be able to handle more characters than 1674 * claim to be able to handle more characters than
1320 * it actually does. 1675 * it actually does.
1676 *
1677 * FIXME: This can probably go away now except that 64K chunks
1678 * are too likely to fail unless switched to vmalloc...
1321 */ 1679 */
1322 chunk = 2048; 1680 chunk = 2048;
1323 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) 1681 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1375,6 +1733,24 @@ static inline ssize_t do_tty_write(
1375} 1733}
1376 1734
1377 1735
1736/**
1737 * tty_write - write method for tty device file
1738 * @file: tty file pointer
1739 * @buf: user data to write
1740 * @count: bytes to write
1741 * @ppos: unused
1742 *
1743 * Write data to a tty device via the line discipline.
1744 *
1745 * Locking:
1746 * Locks the line discipline as required
1747 * Writes to the tty driver are serialized by the atomic_write_lock
1748 * and are then processed in chunks to the device. The line discipline
1749 * write method will not be involked in parallel for each device
1750 * The line discipline write method is called under the big
1751 * kernel lock for historical reasons. New code should not rely on this.
1752 */
1753
1378static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, 1754static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
1379 loff_t *ppos) 1755 loff_t *ppos)
1380{ 1756{
@@ -1422,7 +1798,18 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t
1422 1798
1423static char ptychar[] = "pqrstuvwxyzabcde"; 1799static char ptychar[] = "pqrstuvwxyzabcde";
1424 1800
1425static inline void pty_line_name(struct tty_driver *driver, int index, char *p) 1801/**
1802 * pty_line_name - generate name for a pty
1803 * @driver: the tty driver in use
1804 * @index: the minor number
1805 * @p: output buffer of at least 6 bytes
1806 *
1807 * Generate a name from a driver reference and write it to the output
1808 * buffer.
1809 *
1810 * Locking: None
1811 */
1812static void pty_line_name(struct tty_driver *driver, int index, char *p)
1426{ 1813{
1427 int i = index + driver->name_base; 1814 int i = index + driver->name_base;
1428 /* ->name is initialized to "ttyp", but "tty" is expected */ 1815 /* ->name is initialized to "ttyp", but "tty" is expected */
@@ -1431,24 +1818,53 @@ static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
1431 ptychar[i >> 4 & 0xf], i & 0xf); 1818 ptychar[i >> 4 & 0xf], i & 0xf);
1432} 1819}
1433 1820
1434static inline void tty_line_name(struct tty_driver *driver, int index, char *p) 1821/**
1822 * pty_line_name - generate name for a tty
1823 * @driver: the tty driver in use
1824 * @index: the minor number
1825 * @p: output buffer of at least 7 bytes
1826 *
1827 * Generate a name from a driver reference and write it to the output
1828 * buffer.
1829 *
1830 * Locking: None
1831 */
1832static void tty_line_name(struct tty_driver *driver, int index, char *p)
1435{ 1833{
1436 sprintf(p, "%s%d", driver->name, index + driver->name_base); 1834 sprintf(p, "%s%d", driver->name, index + driver->name_base);
1437} 1835}
1438 1836
1439/* 1837/**
1838 * init_dev - initialise a tty device
1839 * @driver: tty driver we are opening a device on
1840 * @idx: device index
1841 * @tty: returned tty structure
1842 *
1843 * Prepare a tty device. This may not be a "new" clean device but
1844 * could also be an active device. The pty drivers require special
1845 * handling because of this.
1846 *
1847 * Locking:
1848 * The function is called under the tty_mutex, which
1849 * protects us from the tty struct or driver itself going away.
1850 *
1851 * On exit the tty device has the line discipline attached and
1852 * a reference count of 1. If a pair was created for pty/tty use
1853 * and the other was a pty master then it too has a reference count of 1.
1854 *
1440 * WSH 06/09/97: Rewritten to remove races and properly clean up after a 1855 * WSH 06/09/97: Rewritten to remove races and properly clean up after a
1441 * failed open. The new code protects the open with a mutex, so it's 1856 * failed open. The new code protects the open with a mutex, so it's
1442 * really quite straightforward. The mutex locking can probably be 1857 * really quite straightforward. The mutex locking can probably be
1443 * relaxed for the (most common) case of reopening a tty. 1858 * relaxed for the (most common) case of reopening a tty.
1444 */ 1859 */
1860
1445static int init_dev(struct tty_driver *driver, int idx, 1861static int init_dev(struct tty_driver *driver, int idx,
1446 struct tty_struct **ret_tty) 1862 struct tty_struct **ret_tty)
1447{ 1863{
1448 struct tty_struct *tty, *o_tty; 1864 struct tty_struct *tty, *o_tty;
1449 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc; 1865 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
1450 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc; 1866 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
1451 int retval=0; 1867 int retval = 0;
1452 1868
1453 /* check whether we're reopening an existing tty */ 1869 /* check whether we're reopening an existing tty */
1454 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { 1870 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
@@ -1662,10 +2078,20 @@ release_mem_out:
1662 goto end_init; 2078 goto end_init;
1663} 2079}
1664 2080
1665/* 2081/**
1666 * Releases memory associated with a tty structure, and clears out the 2082 * release_mem - release tty structure memory
1667 * driver table slots. 2083 *
2084 * Releases memory associated with a tty structure, and clears out the
2085 * driver table slots. This function is called when a device is no longer
2086 * in use. It also gets called when setup of a device fails.
2087 *
2088 * Locking:
2089 * tty_mutex - sometimes only
2090 * takes the file list lock internally when working on the list
2091 * of ttys that the driver keeps.
2092 * FIXME: should we require tty_mutex is held here ??
1668 */ 2093 */
2094
1669static void release_mem(struct tty_struct *tty, int idx) 2095static void release_mem(struct tty_struct *tty, int idx)
1670{ 2096{
1671 struct tty_struct *o_tty; 2097 struct tty_struct *o_tty;
@@ -2006,18 +2432,27 @@ static void release_dev(struct file * filp)
2006 2432
2007} 2433}
2008 2434
2009/* 2435/**
2010 * tty_open and tty_release keep up the tty count that contains the 2436 * tty_open - open a tty device
2011 * number of opens done on a tty. We cannot use the inode-count, as 2437 * @inode: inode of device file
2012 * different inodes might point to the same tty. 2438 * @filp: file pointer to tty
2439 *
2440 * tty_open and tty_release keep up the tty count that contains the
2441 * number of opens done on a tty. We cannot use the inode-count, as
2442 * different inodes might point to the same tty.
2013 * 2443 *
2014 * Open-counting is needed for pty masters, as well as for keeping 2444 * Open-counting is needed for pty masters, as well as for keeping
2015 * track of serial lines: DTR is dropped when the last close happens. 2445 * track of serial lines: DTR is dropped when the last close happens.
2016 * (This is not done solely through tty->count, now. - Ted 1/27/92) 2446 * (This is not done solely through tty->count, now. - Ted 1/27/92)
2017 * 2447 *
2018 * The termios state of a pty is reset on first open so that 2448 * The termios state of a pty is reset on first open so that
2019 * settings don't persist across reuse. 2449 * settings don't persist across reuse.
2450 *
2451 * Locking: tty_mutex protects current->signal->tty, get_tty_driver and
2452 * init_dev work. tty->count should protect the rest.
2453 * task_lock is held to update task details for sessions
2020 */ 2454 */
2455
2021static int tty_open(struct inode * inode, struct file * filp) 2456static int tty_open(struct inode * inode, struct file * filp)
2022{ 2457{
2023 struct tty_struct *tty; 2458 struct tty_struct *tty;
@@ -2132,6 +2567,18 @@ got_driver:
2132} 2567}
2133 2568
2134#ifdef CONFIG_UNIX98_PTYS 2569#ifdef CONFIG_UNIX98_PTYS
2570/**
2571 * ptmx_open - open a unix 98 pty master
2572 * @inode: inode of device file
2573 * @filp: file pointer to tty
2574 *
2575 * Allocate a unix98 pty master device from the ptmx driver.
2576 *
2577 * Locking: tty_mutex protects theinit_dev work. tty->count should
2578 protect the rest.
2579 * allocated_ptys_lock handles the list of free pty numbers
2580 */
2581
2135static int ptmx_open(struct inode * inode, struct file * filp) 2582static int ptmx_open(struct inode * inode, struct file * filp)
2136{ 2583{
2137 struct tty_struct *tty; 2584 struct tty_struct *tty;
@@ -2191,6 +2638,18 @@ out:
2191} 2638}
2192#endif 2639#endif
2193 2640
2641/**
2642 * tty_release - vfs callback for close
2643 * @inode: inode of tty
2644 * @filp: file pointer for handle to tty
2645 *
2646 * Called the last time each file handle is closed that references
2647 * this tty. There may however be several such references.
2648 *
2649 * Locking:
2650 * Takes bkl. See release_dev
2651 */
2652
2194static int tty_release(struct inode * inode, struct file * filp) 2653static int tty_release(struct inode * inode, struct file * filp)
2195{ 2654{
2196 lock_kernel(); 2655 lock_kernel();
@@ -2199,7 +2658,18 @@ static int tty_release(struct inode * inode, struct file * filp)
2199 return 0; 2658 return 0;
2200} 2659}
2201 2660
2202/* No kernel lock held - fine */ 2661/**
2662 * tty_poll - check tty status
2663 * @filp: file being polled
2664 * @wait: poll wait structures to update
2665 *
2666 * Call the line discipline polling method to obtain the poll
2667 * status of the device.
2668 *
2669 * Locking: locks called line discipline but ldisc poll method
2670 * may be re-entered freely by other callers.
2671 */
2672
2203static unsigned int tty_poll(struct file * filp, poll_table * wait) 2673static unsigned int tty_poll(struct file * filp, poll_table * wait)
2204{ 2674{
2205 struct tty_struct * tty; 2675 struct tty_struct * tty;
@@ -2243,6 +2713,21 @@ static int tty_fasync(int fd, struct file * filp, int on)
2243 return 0; 2713 return 0;
2244} 2714}
2245 2715
2716/**
2717 * tiocsti - fake input character
2718 * @tty: tty to fake input into
2719 * @p: pointer to character
2720 *
2721 * Fake input to a tty device. Does the neccessary locking and
2722 * input management.
2723 *
2724 * FIXME: does not honour flow control ??
2725 *
2726 * Locking:
2727 * Called functions take tty_ldisc_lock
2728 * current->signal->tty check is safe without locks
2729 */
2730
2246static int tiocsti(struct tty_struct *tty, char __user *p) 2731static int tiocsti(struct tty_struct *tty, char __user *p)
2247{ 2732{
2248 char ch, mbz = 0; 2733 char ch, mbz = 0;
@@ -2258,6 +2743,18 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2258 return 0; 2743 return 0;
2259} 2744}
2260 2745
2746/**
2747 * tiocgwinsz - implement window query ioctl
2748 * @tty; tty
2749 * @arg: user buffer for result
2750 *
2751 * Copies the kernel idea of the window size into the user buffer. No
2752 * locking is done.
2753 *
2754 * FIXME: Returning random values racing a window size set is wrong
2755 * should lock here against that
2756 */
2757
2261static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) 2758static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2262{ 2759{
2263 if (copy_to_user(arg, &tty->winsize, sizeof(*arg))) 2760 if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
@@ -2265,6 +2762,24 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2265 return 0; 2762 return 0;
2266} 2763}
2267 2764
2765/**
2766 * tiocswinsz - implement window size set ioctl
2767 * @tty; tty
2768 * @arg: user buffer for result
2769 *
2770 * Copies the user idea of the window size to the kernel. Traditionally
2771 * this is just advisory information but for the Linux console it
2772 * actually has driver level meaning and triggers a VC resize.
2773 *
2774 * Locking:
2775 * The console_sem is used to ensure we do not try and resize
2776 * the console twice at once.
2777 * FIXME: Two racing size sets may leave the console and kernel
2778 * parameters disagreeing. Is this exploitable ?
2779 * FIXME: Random values racing a window size get is wrong
2780 * should lock here against that
2781 */
2782
2268static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, 2783static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2269 struct winsize __user * arg) 2784 struct winsize __user * arg)
2270{ 2785{
@@ -2294,6 +2809,15 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2294 return 0; 2809 return 0;
2295} 2810}
2296 2811
2812/**
2813 * tioccons - allow admin to move logical console
2814 * @file: the file to become console
2815 *
2816 * Allow the adminstrator to move the redirected console device
2817 *
2818 * Locking: uses redirect_lock to guard the redirect information
2819 */
2820
2297static int tioccons(struct file *file) 2821static int tioccons(struct file *file)
2298{ 2822{
2299 if (!capable(CAP_SYS_ADMIN)) 2823 if (!capable(CAP_SYS_ADMIN))
@@ -2319,6 +2843,17 @@ static int tioccons(struct file *file)
2319 return 0; 2843 return 0;
2320} 2844}
2321 2845
2846/**
2847 * fionbio - non blocking ioctl
2848 * @file: file to set blocking value
2849 * @p: user parameter
2850 *
2851 * Historical tty interfaces had a blocking control ioctl before
2852 * the generic functionality existed. This piece of history is preserved
2853 * in the expected tty API of posix OS's.
2854 *
2855 * Locking: none, the open fle handle ensures it won't go away.
2856 */
2322 2857
2323static int fionbio(struct file *file, int __user *p) 2858static int fionbio(struct file *file, int __user *p)
2324{ 2859{
@@ -2334,6 +2869,23 @@ static int fionbio(struct file *file, int __user *p)
2334 return 0; 2869 return 0;
2335} 2870}
2336 2871
2872/**
2873 * tiocsctty - set controlling tty
2874 * @tty: tty structure
2875 * @arg: user argument
2876 *
2877 * This ioctl is used to manage job control. It permits a session
2878 * leader to set this tty as the controlling tty for the session.
2879 *
2880 * Locking:
2881 * Takes tasklist lock internally to walk sessions
2882 * Takes task_lock() when updating signal->tty
2883 *
2884 * FIXME: tty_mutex is needed to protect signal->tty references.
2885 * FIXME: why task_lock on the signal->tty reference ??
2886 *
2887 */
2888
2337static int tiocsctty(struct tty_struct *tty, int arg) 2889static int tiocsctty(struct tty_struct *tty, int arg)
2338{ 2890{
2339 struct task_struct *p; 2891 struct task_struct *p;
@@ -2374,6 +2926,18 @@ static int tiocsctty(struct tty_struct *tty, int arg)
2374 return 0; 2926 return 0;
2375} 2927}
2376 2928
2929/**
2930 * tiocgpgrp - get process group
2931 * @tty: tty passed by user
2932 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2933 * @p: returned pid
2934 *
2935 * Obtain the process group of the tty. If there is no process group
2936 * return an error.
2937 *
2938 * Locking: none. Reference to ->signal->tty is safe.
2939 */
2940
2377static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2941static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2378{ 2942{
2379 /* 2943 /*
@@ -2385,6 +2949,20 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2385 return put_user(real_tty->pgrp, p); 2949 return put_user(real_tty->pgrp, p);
2386} 2950}
2387 2951
2952/**
2953 * tiocspgrp - attempt to set process group
2954 * @tty: tty passed by user
2955 * @real_tty: tty side device matching tty passed by user
2956 * @p: pid pointer
2957 *
2958 * Set the process group of the tty to the session passed. Only
2959 * permitted where the tty session is our session.
2960 *
2961 * Locking: None
2962 *
2963 * FIXME: current->signal->tty referencing is unsafe.
2964 */
2965
2388static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2966static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2389{ 2967{
2390 pid_t pgrp; 2968 pid_t pgrp;
@@ -2408,6 +2986,18 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2408 return 0; 2986 return 0;
2409} 2987}
2410 2988
2989/**
2990 * tiocgsid - get session id
2991 * @tty: tty passed by user
2992 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2993 * @p: pointer to returned session id
2994 *
2995 * Obtain the session id of the tty. If there is no session
2996 * return an error.
2997 *
2998 * Locking: none. Reference to ->signal->tty is safe.
2999 */
3000
2411static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 3001static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2412{ 3002{
2413 /* 3003 /*
@@ -2421,6 +3011,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
2421 return put_user(real_tty->session, p); 3011 return put_user(real_tty->session, p);
2422} 3012}
2423 3013
3014/**
3015 * tiocsetd - set line discipline
3016 * @tty: tty device
3017 * @p: pointer to user data
3018 *
3019 * Set the line discipline according to user request.
3020 *
3021 * Locking: see tty_set_ldisc, this function is just a helper
3022 */
3023
2424static int tiocsetd(struct tty_struct *tty, int __user *p) 3024static int tiocsetd(struct tty_struct *tty, int __user *p)
2425{ 3025{
2426 int ldisc; 3026 int ldisc;
@@ -2430,6 +3030,21 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
2430 return tty_set_ldisc(tty, ldisc); 3030 return tty_set_ldisc(tty, ldisc);
2431} 3031}
2432 3032
3033/**
3034 * send_break - performed time break
3035 * @tty: device to break on
3036 * @duration: timeout in mS
3037 *
3038 * Perform a timed break on hardware that lacks its own driver level
3039 * timed break functionality.
3040 *
3041 * Locking:
3042 * None
3043 *
3044 * FIXME:
3045 * What if two overlap
3046 */
3047
2433static int send_break(struct tty_struct *tty, unsigned int duration) 3048static int send_break(struct tty_struct *tty, unsigned int duration)
2434{ 3049{
2435 tty->driver->break_ctl(tty, -1); 3050 tty->driver->break_ctl(tty, -1);
@@ -2442,8 +3057,19 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
2442 return 0; 3057 return 0;
2443} 3058}
2444 3059
2445static int 3060/**
2446tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) 3061 * tiocmget - get modem status
3062 * @tty: tty device
3063 * @file: user file pointer
3064 * @p: pointer to result
3065 *
3066 * Obtain the modem status bits from the tty driver if the feature
3067 * is supported. Return -EINVAL if it is not available.
3068 *
3069 * Locking: none (up to the driver)
3070 */
3071
3072static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2447{ 3073{
2448 int retval = -EINVAL; 3074 int retval = -EINVAL;
2449 3075
@@ -2456,8 +3082,20 @@ tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2456 return retval; 3082 return retval;
2457} 3083}
2458 3084
2459static int 3085/**
2460tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, 3086 * tiocmset - set modem status
3087 * @tty: tty device
3088 * @file: user file pointer
3089 * @cmd: command - clear bits, set bits or set all
3090 * @p: pointer to desired bits
3091 *
3092 * Set the modem status bits from the tty driver if the feature
3093 * is supported. Return -EINVAL if it is not available.
3094 *
3095 * Locking: none (up to the driver)
3096 */
3097
3098static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
2461 unsigned __user *p) 3099 unsigned __user *p)
2462{ 3100{
2463 int retval = -EINVAL; 3101 int retval = -EINVAL;
@@ -2573,6 +3211,7 @@ int tty_ioctl(struct inode * inode, struct file * file,
2573 clear_bit(TTY_EXCLUSIVE, &tty->flags); 3211 clear_bit(TTY_EXCLUSIVE, &tty->flags);
2574 return 0; 3212 return 0;
2575 case TIOCNOTTY: 3213 case TIOCNOTTY:
3214 /* FIXME: taks lock or tty_mutex ? */
2576 if (current->signal->tty != tty) 3215 if (current->signal->tty != tty)
2577 return -ENOTTY; 3216 return -ENOTTY;
2578 if (current->signal->leader) 3217 if (current->signal->leader)
@@ -2753,9 +3392,16 @@ void do_SAK(struct tty_struct *tty)
2753 3392
2754EXPORT_SYMBOL(do_SAK); 3393EXPORT_SYMBOL(do_SAK);
2755 3394
2756/* 3395/**
2757 * This routine is called out of the software interrupt to flush data 3396 * flush_to_ldisc
2758 * from the buffer chain to the line discipline. 3397 * @private_: tty structure passed from work queue.
3398 *
3399 * This routine is called out of the software interrupt to flush data
3400 * from the buffer chain to the line discipline.
3401 *
3402 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
3403 * while invoking the line discipline receive_buf method. The
3404 * receive_buf method is single threaded for each tty instance.
2759 */ 3405 */
2760 3406
2761static void flush_to_ldisc(void *private_) 3407static void flush_to_ldisc(void *private_)
@@ -2831,6 +3477,8 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
2831 * Convert termios baud rate data into a speed. This should be called 3477 * Convert termios baud rate data into a speed. This should be called
2832 * with the termios lock held if this termios is a terminal termios 3478 * with the termios lock held if this termios is a terminal termios
2833 * structure. May change the termios data. 3479 * structure. May change the termios data.
3480 *
3481 * Locking: none
2834 */ 3482 */
2835 3483
2836int tty_termios_baud_rate(struct termios *termios) 3484int tty_termios_baud_rate(struct termios *termios)
@@ -2859,6 +3507,8 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
2859 * Returns the baud rate as an integer for this terminal. The 3507 * Returns the baud rate as an integer for this terminal. The
2860 * termios lock must be held by the caller and the terminal bit 3508 * termios lock must be held by the caller and the terminal bit
2861 * flags may be updated. 3509 * flags may be updated.
3510 *
3511 * Locking: none
2862 */ 3512 */
2863 3513
2864int tty_get_baud_rate(struct tty_struct *tty) 3514int tty_get_baud_rate(struct tty_struct *tty)
@@ -2888,6 +3538,8 @@ EXPORT_SYMBOL(tty_get_baud_rate);
2888 * 3538 *
2889 * In the event of the queue being busy for flipping the work will be 3539 * In the event of the queue being busy for flipping the work will be
2890 * held off and retried later. 3540 * held off and retried later.
3541 *
3542 * Locking: tty buffer lock. Driver locks in low latency mode.
2891 */ 3543 */
2892 3544
2893void tty_flip_buffer_push(struct tty_struct *tty) 3545void tty_flip_buffer_push(struct tty_struct *tty)
@@ -2907,9 +3559,16 @@ void tty_flip_buffer_push(struct tty_struct *tty)
2907EXPORT_SYMBOL(tty_flip_buffer_push); 3559EXPORT_SYMBOL(tty_flip_buffer_push);
2908 3560
2909 3561
2910/* 3562/**
2911 * This subroutine initializes a tty structure. 3563 * initialize_tty_struct
3564 * @tty: tty to initialize
3565 *
3566 * This subroutine initializes a tty structure that has been newly
3567 * allocated.
3568 *
3569 * Locking: none - tty in question must not be exposed at this point
2912 */ 3570 */
3571
2913static void initialize_tty_struct(struct tty_struct *tty) 3572static void initialize_tty_struct(struct tty_struct *tty)
2914{ 3573{
2915 memset(tty, 0, sizeof(struct tty_struct)); 3574 memset(tty, 0, sizeof(struct tty_struct));
@@ -2935,6 +3594,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
2935/* 3594/*
2936 * The default put_char routine if the driver did not define one. 3595 * The default put_char routine if the driver did not define one.
2937 */ 3596 */
3597
2938static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) 3598static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2939{ 3599{
2940 tty->driver->write(tty, &ch, 1); 3600 tty->driver->write(tty, &ch, 1);
@@ -2943,19 +3603,23 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2943static struct class *tty_class; 3603static struct class *tty_class;
2944 3604
2945/** 3605/**
2946 * tty_register_device - register a tty device 3606 * tty_register_device - register a tty device
2947 * @driver: the tty driver that describes the tty device 3607 * @driver: the tty driver that describes the tty device
2948 * @index: the index in the tty driver for this tty device 3608 * @index: the index in the tty driver for this tty device
2949 * @device: a struct device that is associated with this tty device. 3609 * @device: a struct device that is associated with this tty device.
2950 * This field is optional, if there is no known struct device for this 3610 * This field is optional, if there is no known struct device
2951 * tty device it can be set to NULL safely. 3611 * for this tty device it can be set to NULL safely.
2952 * 3612 *
2953 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). 3613 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
2954 * 3614 *
2955 * This call is required to be made to register an individual tty device if 3615 * This call is required to be made to register an individual tty device
2956 * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that 3616 * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
2957 * bit is not set, this function should not be called by a tty driver. 3617 * that bit is not set, this function should not be called by a tty
3618 * driver.
3619 *
3620 * Locking: ??
2958 */ 3621 */
3622
2959struct class_device *tty_register_device(struct tty_driver *driver, 3623struct class_device *tty_register_device(struct tty_driver *driver,
2960 unsigned index, struct device *device) 3624 unsigned index, struct device *device)
2961{ 3625{
@@ -2977,13 +3641,16 @@ struct class_device *tty_register_device(struct tty_driver *driver,
2977} 3641}
2978 3642
2979/** 3643/**
2980 * tty_unregister_device - unregister a tty device 3644 * tty_unregister_device - unregister a tty device
2981 * @driver: the tty driver that describes the tty device 3645 * @driver: the tty driver that describes the tty device
2982 * @index: the index in the tty driver for this tty device 3646 * @index: the index in the tty driver for this tty device
2983 * 3647 *
2984 * If a tty device is registered with a call to tty_register_device() then 3648 * If a tty device is registered with a call to tty_register_device() then
2985 * this function must be made when the tty device is gone. 3649 * this function must be called when the tty device is gone.
3650 *
3651 * Locking: ??
2986 */ 3652 */
3653
2987void tty_unregister_device(struct tty_driver *driver, unsigned index) 3654void tty_unregister_device(struct tty_driver *driver, unsigned index)
2988{ 3655{
2989 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); 3656 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
@@ -3094,7 +3761,6 @@ int tty_register_driver(struct tty_driver *driver)
3094 driver->cdev.owner = driver->owner; 3761 driver->cdev.owner = driver->owner;
3095 error = cdev_add(&driver->cdev, dev, driver->num); 3762 error = cdev_add(&driver->cdev, dev, driver->num);
3096 if (error) { 3763 if (error) {
3097 cdev_del(&driver->cdev);
3098 unregister_chrdev_region(dev, driver->num); 3764 unregister_chrdev_region(dev, driver->num);
3099 driver->ttys = NULL; 3765 driver->ttys = NULL;
3100 driver->termios = driver->termios_locked = NULL; 3766 driver->termios = driver->termios_locked = NULL;
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index f19cf9d7792..4ad47d321bd 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -36,6 +36,18 @@
36#define TERMIOS_WAIT 2 36#define TERMIOS_WAIT 2
37#define TERMIOS_TERMIO 4 37#define TERMIOS_TERMIO 4
38 38
39
40/**
41 * tty_wait_until_sent - wait for I/O to finish
42 * @tty: tty we are waiting for
43 * @timeout: how long we will wait
44 *
45 * Wait for characters pending in a tty driver to hit the wire, or
46 * for a timeout to occur (eg due to flow control)
47 *
48 * Locking: none
49 */
50
39void tty_wait_until_sent(struct tty_struct * tty, long timeout) 51void tty_wait_until_sent(struct tty_struct * tty, long timeout)
40{ 52{
41 DECLARE_WAITQUEUE(wait, current); 53 DECLARE_WAITQUEUE(wait, current);
@@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios,
94 old->c_cc[i] : termios->c_cc[i]; 106 old->c_cc[i] : termios->c_cc[i];
95} 107}
96 108
109/**
110 * change_termios - update termios values
111 * @tty: tty to update
112 * @new_termios: desired new value
113 *
114 * Perform updates to the termios values set on this terminal. There
115 * is a bit of layering violation here with n_tty in terms of the
116 * internal knowledge of this function.
117 *
118 * Locking: termios_sem
119 */
120
97static void change_termios(struct tty_struct * tty, struct termios * new_termios) 121static void change_termios(struct tty_struct * tty, struct termios * new_termios)
98{ 122{
99 int canon_change; 123 int canon_change;
@@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
155 up(&tty->termios_sem); 179 up(&tty->termios_sem);
156} 180}
157 181
182/**
183 * set_termios - set termios values for a tty
184 * @tty: terminal device
185 * @arg: user data
186 * @opt: option information
187 *
188 * Helper function to prepare termios data and run neccessary other
189 * functions before using change_termios to do the actual changes.
190 *
191 * Locking:
192 * Called functions take ldisc and termios_sem locks
193 */
194
158static int set_termios(struct tty_struct * tty, void __user *arg, int opt) 195static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
159{ 196{
160 struct termios tmp_termios; 197 struct termios tmp_termios;
@@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags)
284 } 321 }
285} 322}
286 323
324/**
325 * set_sgttyb - set legacy terminal values
326 * @tty: tty structure
327 * @sgttyb: pointer to old style terminal structure
328 *
329 * Updates a terminal from the legacy BSD style terminal information
330 * structure.
331 *
332 * Locking: termios_sem
333 */
334
287static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) 335static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
288{ 336{
289 int retval; 337 int retval;
@@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars)
369} 417}
370#endif 418#endif
371 419
372/* 420/**
373 * Send a high priority character to the tty. 421 * send_prio_char - send priority character
422 *
423 * Send a high priority character to the tty even if stopped
424 *
425 * Locking: none
426 *
427 * FIXME: overlapping calls with start/stop tty lose state of tty
374 */ 428 */
429
375static void send_prio_char(struct tty_struct *tty, char ch) 430static void send_prio_char(struct tty_struct *tty, char ch)
376{ 431{
377 int was_stopped = tty->stopped; 432 int was_stopped = tty->stopped;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index eccffaf26fa..a5628a8b662 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1011 return -EPERM; 1011 return -EPERM;
1012 vt_dont_switch = 0; 1012 vt_dont_switch = 0;
1013 return 0; 1013 return 0;
1014 case VT_GETHIFONTMASK:
1015 return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
1014 default: 1016 default:
1015 return -ENOIOCTLCMD; 1017 return -ENOIOCTLCMD;
1016 } 1018 }
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index d53f664a4dd..fff89c2d88f 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -45,7 +45,7 @@ config WATCHDOG_NOWAYOUT
45comment "Watchdog Device Drivers" 45comment "Watchdog Device Drivers"
46 depends on WATCHDOG 46 depends on WATCHDOG
47 47
48# Architecture Independant 48# Architecture Independent
49 49
50config SOFT_WATCHDOG 50config SOFT_WATCHDOG
51 tristate "Software watchdog" 51 tristate "Software watchdog"
@@ -127,7 +127,7 @@ config S3C2410_WATCHDOG
127 enabled. 127 enabled.
128 128
129 The driver is limited by the speed of the system's PCLK 129 The driver is limited by the speed of the system's PCLK
130 signal, so with reasonbaly fast systems (PCLK around 50-66MHz) 130 signal, so with reasonably fast systems (PCLK around 50-66MHz)
131 then watchdog intervals of over approximately 20seconds are 131 then watchdog intervals of over approximately 20seconds are
132 unavailable. 132 unavailable.
133 133
@@ -423,7 +423,7 @@ config SBC_EPX_C3_WATCHDOG
423 is no way to know if writing to its IO address will corrupt 423 is no way to know if writing to its IO address will corrupt
424 your system or have any real effect. The only way to be sure 424 your system or have any real effect. The only way to be sure
425 that this driver does what you want is to make sure you 425 that this driver does what you want is to make sure you
426 are runnning it on an EPX-C3 from Winsystems with the watchdog 426 are running it on an EPX-C3 from Winsystems with the watchdog
427 timer at IO address 0x1ee and 0x1ef. It will write to both those 427 timer at IO address 0x1ee and 0x1ef. It will write to both those
428 IO ports. Basically, the assumption is made that if you compile 428 IO ports. Basically, the assumption is made that if you compile
429 this driver into your kernel and/or load it as a module, that you 429 this driver into your kernel and/or load it as a module, that you
@@ -472,7 +472,7 @@ config INDYDOG
472 tristate "Indy/I2 Hardware Watchdog" 472 tristate "Indy/I2 Hardware Watchdog"
473 depends on WATCHDOG && SGI_IP22 473 depends on WATCHDOG && SGI_IP22
474 help 474 help
475 Hardwaredriver for the Indy's/I2's watchdog. This is a 475 Hardware driver for the Indy's/I2's watchdog. This is a
476 watchdog timer that will reboot the machine after a 60 second 476 watchdog timer that will reboot the machine after a 60 second
477 timer expired and no process has written to /dev/watchdog during 477 timer expired and no process has written to /dev/watchdog during
478 that time. 478 that time.
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index cc15c4f2e9e..35ad1b03272 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -26,6 +26,7 @@
26#include <linux/jiffies.h> 26#include <linux/jiffies.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
30#include <linux/hwmon.h> 31#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 32#include <linux/hwmon-sysfs.h>
@@ -64,17 +65,17 @@
64#define ABIT_UGURU_IN_SENSOR 0 65#define ABIT_UGURU_IN_SENSOR 0
65#define ABIT_UGURU_TEMP_SENSOR 1 66#define ABIT_UGURU_TEMP_SENSOR 1
66#define ABIT_UGURU_NC 2 67#define ABIT_UGURU_NC 2
67/* Timeouts / Retries, if these turn out to need a lot of fiddling we could 68/* In many cases we need to wait for the uGuru to reach a certain status, most
68 convert them to params. */ 69 of the time it will reach this status within 30 - 90 ISA reads, and thus we
69/* 250 was determined by trial and error, 200 works most of the time, but not 70 can best busy wait. This define gives the total amount of reads to try. */
70 always. I assume this is cpu-speed independent, since the ISA-bus and not 71#define ABIT_UGURU_WAIT_TIMEOUT 125
71 the CPU should be the bottleneck. Note that 250 sometimes is still not 72/* However sometimes older versions of the uGuru seem to be distracted and they
72 enough (only reported on AN7 mb) this is handled by a higher layer. */ 73 do not respond for a long time. To handle this we sleep before each of the
73#define ABIT_UGURU_WAIT_TIMEOUT 250 74 last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */
75#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5
74/* Normally all expected status in abituguru_ready, are reported after the 76/* Normally all expected status in abituguru_ready, are reported after the
75 first read, but sometimes not and we need to poll, 5 polls was not enough 77 first read, but sometimes not and we need to poll. */
76 50 sofar is. */ 78#define ABIT_UGURU_READY_TIMEOUT 5
77#define ABIT_UGURU_READY_TIMEOUT 50
78/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */ 79/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */
79#define ABIT_UGURU_MAX_RETRIES 3 80#define ABIT_UGURU_MAX_RETRIES 3
80#define ABIT_UGURU_RETRY_DELAY (HZ/5) 81#define ABIT_UGURU_RETRY_DELAY (HZ/5)
@@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state)
226 timeout--; 227 timeout--;
227 if (timeout == 0) 228 if (timeout == 0)
228 return -EBUSY; 229 return -EBUSY;
230 /* sleep a bit before our last few tries, see the comment on
231 this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */
232 if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP)
233 msleep(0);
229 } 234 }
230 return 0; 235 return 0;
231} 236}
@@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data)
256 "CMD reg does not hold 0xAC after ready command\n"); 261 "CMD reg does not hold 0xAC after ready command\n");
257 return -EIO; 262 return -EIO;
258 } 263 }
264 msleep(0);
259 } 265 }
260 266
261 /* After this the ABIT_UGURU_DATA port should contain 267 /* After this the ABIT_UGURU_DATA port should contain
@@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data)
268 "state != more input after ready command\n"); 274 "state != more input after ready command\n");
269 return -EIO; 275 return -EIO;
270 } 276 }
277 msleep(0);
271 } 278 }
272 279
273 data->uguru_ready = 1; 280 data->uguru_ready = 1;
@@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data,
331 /* And read the data */ 338 /* And read the data */
332 for (i = 0; i < count; i++) { 339 for (i = 0; i < count; i++) {
333 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 340 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
334 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for " 341 ABIT_UGURU_DEBUG(retries ? 1 : 3,
342 "timeout exceeded waiting for "
335 "read state (bank: %d, sensor: %d)\n", 343 "read state (bank: %d, sensor: %d)\n",
336 (int)bank_addr, (int)sensor_addr); 344 (int)bank_addr, (int)sensor_addr);
337 break; 345 break;
@@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data,
350static int abituguru_write(struct abituguru_data *data, 358static int abituguru_write(struct abituguru_data *data,
351 u8 bank_addr, u8 sensor_addr, u8 *buf, int count) 359 u8 bank_addr, u8 sensor_addr, u8 *buf, int count)
352{ 360{
353 int i; 361 /* We use the ready timeout as we have to wait for 0xAC just like the
362 ready function */
363 int i, timeout = ABIT_UGURU_READY_TIMEOUT;
354 364
355 /* Send the address */ 365 /* Send the address */
356 i = abituguru_send_address(data, bank_addr, sensor_addr, 366 i = abituguru_send_address(data, bank_addr, sensor_addr,
@@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data,
370 } 380 }
371 381
372 /* Now we need to wait till the chip is ready to be read again, 382 /* Now we need to wait till the chip is ready to be read again,
373 don't ask why */ 383 so that we can read 0xAC as confirmation that our write has
384 succeeded. */
374 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 385 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
375 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state " 386 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state "
376 "after write (bank: %d, sensor: %d)\n", (int)bank_addr, 387 "after write (bank: %d, sensor: %d)\n", (int)bank_addr,
@@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data,
379 } 390 }
380 391
381 /* Cmd port MUST be read now and should contain 0xAC */ 392 /* Cmd port MUST be read now and should contain 0xAC */
382 if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { 393 while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
383 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write " 394 timeout--;
384 "(bank: %d, sensor: %d)\n", (int)bank_addr, 395 if (timeout == 0) {
385 (int)sensor_addr); 396 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after "
386 return -EIO; 397 "write (bank: %d, sensor: %d)\n",
398 (int)bank_addr, (int)sensor_addr);
399 return -EIO;
400 }
401 msleep(0);
387 } 402 }
388 403
389 /* Last put the chip back in ready state */ 404 /* Last put the chip back in ready state */
@@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
403 u8 sensor_addr) 418 u8 sensor_addr)
404{ 419{
405 u8 val, buf[3]; 420 u8 val, buf[3];
406 int ret = ABIT_UGURU_NC; 421 int i, ret = -ENODEV; /* error is the most common used retval :| */
407 422
408 /* If overriden by the user return the user selected type */ 423 /* If overriden by the user return the user selected type */
409 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR && 424 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR &&
@@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
439 buf[2] = 250; 454 buf[2] = 250;
440 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 455 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
441 buf, 3) != 3) 456 buf, 3) != 3)
442 return -ENODEV; 457 goto abituguru_detect_bank1_sensor_type_exit;
443 /* Now we need 20 ms to give the uguru time to read the sensors 458 /* Now we need 20 ms to give the uguru time to read the sensors
444 and raise a voltage alarm */ 459 and raise a voltage alarm */
445 set_current_state(TASK_UNINTERRUPTIBLE); 460 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
447 /* Check for alarm and check the alarm is a volt low alarm. */ 462 /* Check for alarm and check the alarm is a volt low alarm. */
448 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 463 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
449 ABIT_UGURU_MAX_RETRIES) != 3) 464 ABIT_UGURU_MAX_RETRIES) != 3)
450 return -ENODEV; 465 goto abituguru_detect_bank1_sensor_type_exit;
451 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 466 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
452 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 467 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
453 sensor_addr, buf, 3, 468 sensor_addr, buf, 3,
454 ABIT_UGURU_MAX_RETRIES) != 3) 469 ABIT_UGURU_MAX_RETRIES) != 3)
455 return -ENODEV; 470 goto abituguru_detect_bank1_sensor_type_exit;
456 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) { 471 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
457 /* Restore original settings */
458 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
459 sensor_addr,
460 data->bank1_settings[sensor_addr],
461 3) != 3)
462 return -ENODEV;
463 ABIT_UGURU_DEBUG(2, " found volt sensor\n"); 472 ABIT_UGURU_DEBUG(2, " found volt sensor\n");
464 return ABIT_UGURU_IN_SENSOR; 473 ret = ABIT_UGURU_IN_SENSOR;
474 goto abituguru_detect_bank1_sensor_type_exit;
465 } else 475 } else
466 ABIT_UGURU_DEBUG(2, " alarm raised during volt " 476 ABIT_UGURU_DEBUG(2, " alarm raised during volt "
467 "sensor test, but volt low flag not set\n"); 477 "sensor test, but volt low flag not set\n");
@@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
477 buf[2] = 10; 487 buf[2] = 10;
478 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 488 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
479 buf, 3) != 3) 489 buf, 3) != 3)
480 return -ENODEV; 490 goto abituguru_detect_bank1_sensor_type_exit;
481 /* Now we need 50 ms to give the uguru time to read the sensors 491 /* Now we need 50 ms to give the uguru time to read the sensors
482 and raise a temp alarm */ 492 and raise a temp alarm */
483 set_current_state(TASK_UNINTERRUPTIBLE); 493 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
485 /* Check for alarm and check the alarm is a temp high alarm. */ 495 /* Check for alarm and check the alarm is a temp high alarm. */
486 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 496 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
487 ABIT_UGURU_MAX_RETRIES) != 3) 497 ABIT_UGURU_MAX_RETRIES) != 3)
488 return -ENODEV; 498 goto abituguru_detect_bank1_sensor_type_exit;
489 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 499 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
490 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 500 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
491 sensor_addr, buf, 3, 501 sensor_addr, buf, 3,
492 ABIT_UGURU_MAX_RETRIES) != 3) 502 ABIT_UGURU_MAX_RETRIES) != 3)
493 return -ENODEV; 503 goto abituguru_detect_bank1_sensor_type_exit;
494 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) { 504 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) {
495 ret = ABIT_UGURU_TEMP_SENSOR;
496 ABIT_UGURU_DEBUG(2, " found temp sensor\n"); 505 ABIT_UGURU_DEBUG(2, " found temp sensor\n");
506 ret = ABIT_UGURU_TEMP_SENSOR;
507 goto abituguru_detect_bank1_sensor_type_exit;
497 } else 508 } else
498 ABIT_UGURU_DEBUG(2, " alarm raised during temp " 509 ABIT_UGURU_DEBUG(2, " alarm raised during temp "
499 "sensor test, but temp high flag not set\n"); 510 "sensor test, but temp high flag not set\n");
@@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
501 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor " 512 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor "
502 "test\n"); 513 "test\n");
503 514
504 /* Restore original settings */ 515 ret = ABIT_UGURU_NC;
505 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 516abituguru_detect_bank1_sensor_type_exit:
506 data->bank1_settings[sensor_addr], 3) != 3) 517 /* Restore original settings, failing here is really BAD, it has been
518 reported that some BIOS-es hang when entering the uGuru menu with
519 invalid settings present in the uGuru, so we try this 3 times. */
520 for (i = 0; i < 3; i++)
521 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
522 sensor_addr, data->bank1_settings[sensor_addr],
523 3) == 3)
524 break;
525 if (i == 3) {
526 printk(KERN_ERR ABIT_UGURU_NAME
527 ": Fatal error could not restore original settings. "
528 "This should never happen please report this to the "
529 "abituguru maintainer (see MAINTAINERS)\n");
507 return -ENODEV; 530 return -ENODEV;
508 531 }
509 return ret; 532 return ret;
510} 533}
511 534
@@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev)
1305 data->update_timeouts = 0; 1328 data->update_timeouts = 0;
1306LEAVE_UPDATE: 1329LEAVE_UPDATE:
1307 /* handle timeout condition */ 1330 /* handle timeout condition */
1308 if (err == -EBUSY) { 1331 if (!success && (err == -EBUSY || err >= 0)) {
1309 /* No overflow please */ 1332 /* No overflow please */
1310 if (data->update_timeouts < 255u) 1333 if (data->update_timeouts < 255u)
1311 data->update_timeouts++; 1334 data->update_timeouts++;
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index e7e27049fbf..0be6fd6a267 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -43,13 +43,12 @@
43/*-------------------------------------------------------------------------*/ 43/*-------------------------------------------------------------------------*/
44 44
45#define DRIVER_VERSION "2 May 2005" 45#define DRIVER_VERSION "2 May 2005"
46#define DRIVER_NAME (tps65010_driver.name) 46#define DRIVER_NAME (tps65010_driver.driver.name)
47 47
48MODULE_DESCRIPTION("TPS6501x Power Management Driver"); 48MODULE_DESCRIPTION("TPS6501x Power Management Driver");
49MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
50 50
51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END }; 51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
52static unsigned short normal_i2c_range[] = { I2C_CLIENT_END };
53 52
54I2C_CLIENT_INSMOD; 53I2C_CLIENT_INSMOD;
55 54
@@ -100,7 +99,7 @@ struct tps65010 {
100 /* not currently tracking GPIO state */ 99 /* not currently tracking GPIO state */
101}; 100};
102 101
103#define POWER_POLL_DELAY msecs_to_jiffies(800) 102#define POWER_POLL_DELAY msecs_to_jiffies(5000)
104 103
105/*-------------------------------------------------------------------------*/ 104/*-------------------------------------------------------------------------*/
106 105
@@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
520 goto fail1; 519 goto fail1;
521 } 520 }
522 521
522 /* the IRQ is active low, but many gpio lines can't support that
523 * so this driver can use falling-edge triggers instead.
524 */
525 irqflags = IRQF_SAMPLE_RANDOM;
523#ifdef CONFIG_ARM 526#ifdef CONFIG_ARM
524 irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW;
525 if (machine_is_omap_h2()) { 527 if (machine_is_omap_h2()) {
526 tps->model = TPS65010; 528 tps->model = TPS65010;
527 omap_cfg_reg(W4_GPIO58); 529 omap_cfg_reg(W4_GPIO58);
@@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
543 545
544 // FIXME set up this board's IRQ ... 546 // FIXME set up this board's IRQ ...
545 } 547 }
546#else
547 irqflags = IRQF_SAMPLE_RANDOM;
548#endif 548#endif
549 549
550 if (tps->irq > 0) { 550 if (tps->irq > 0) {
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 2f962cfa3f7..78810ba982e 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -180,6 +180,36 @@ static ide_pci_device_t generic_chipsets[] __devinitdata = {
180 .channels = 2, 180 .channels = 2,
181 .autodma = AUTODMA, 181 .autodma = AUTODMA,
182 .bootable = OFF_BOARD, 182 .bootable = OFF_BOARD,
183 },{ /* 15 */
184 .name = "JMB361",
185 .init_hwif = init_hwif_generic,
186 .channels = 2,
187 .autodma = AUTODMA,
188 .bootable = OFF_BOARD,
189 },{ /* 16 */
190 .name = "JMB363",
191 .init_hwif = init_hwif_generic,
192 .channels = 2,
193 .autodma = AUTODMA,
194 .bootable = OFF_BOARD,
195 },{ /* 17 */
196 .name = "JMB365",
197 .init_hwif = init_hwif_generic,
198 .channels = 2,
199 .autodma = AUTODMA,
200 .bootable = OFF_BOARD,
201 },{ /* 18 */
202 .name = "JMB366",
203 .init_hwif = init_hwif_generic,
204 .channels = 2,
205 .autodma = AUTODMA,
206 .bootable = OFF_BOARD,
207 },{ /* 19 */
208 .name = "JMB368",
209 .init_hwif = init_hwif_generic,
210 .channels = 2,
211 .autodma = AUTODMA,
212 .bootable = OFF_BOARD,
183 } 213 }
184}; 214};
185 215
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index afdaee3c15c..9b7589e8e93 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b, 7 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
8 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a, 8 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
9 * vt8235, vt8237 9 * vt8235, vt8237, vt8237a
10 * 10 *
11 * Copyright (c) 2000-2002 Vojtech Pavlik 11 * Copyright (c) 2000-2002 Vojtech Pavlik
12 * 12 *
@@ -81,6 +81,7 @@ static struct via_isa_bridge {
81 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 81 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
82 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 82 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
83 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 83 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
84 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
84 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 85 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
85 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 86 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
86 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 }, 87 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d4bad6704bb..448df277337 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
3552 3552
3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) 3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3554{ 3554{
3555 pci_save_state(pdev);
3556
3555#ifdef CONFIG_PPC_PMAC 3557#ifdef CONFIG_PPC_PMAC
3556 if (machine_is(powermac)) { 3558 if (machine_is(powermac)) {
3557 struct device_node *of_node; 3559 struct device_node *of_node;
@@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3563 } 3565 }
3564#endif 3566#endif
3565 3567
3566 pci_save_state(pdev);
3567
3568 return 0; 3568 return 0;
3569} 3569}
3570 3570
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index e05ca2cdc73..75313ade2e0 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
301 event->event == IB_EVENT_PORT_ACTIVE || 301 event->event == IB_EVENT_PORT_ACTIVE ||
302 event->event == IB_EVENT_LID_CHANGE || 302 event->event == IB_EVENT_LID_CHANGE ||
303 event->event == IB_EVENT_PKEY_CHANGE || 303 event->event == IB_EVENT_PKEY_CHANGE ||
304 event->event == IB_EVENT_SM_CHANGE) { 304 event->event == IB_EVENT_SM_CHANGE ||
305 event->event == IB_EVENT_CLIENT_REREGISTER) {
305 work = kmalloc(sizeof *work, GFP_ATOMIC); 306 work = kmalloc(sizeof *work, GFP_ATOMIC);
306 if (work) { 307 if (work) {
307 INIT_WORK(&work->work, ib_cache_task, work); 308 INIT_WORK(&work->work, ib_cache_task, work);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index aeda484ffd8..d6b84226bba 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
405 event->event == IB_EVENT_PORT_ACTIVE || 405 event->event == IB_EVENT_PORT_ACTIVE ||
406 event->event == IB_EVENT_LID_CHANGE || 406 event->event == IB_EVENT_LID_CHANGE ||
407 event->event == IB_EVENT_PKEY_CHANGE || 407 event->event == IB_EVENT_PKEY_CHANGE ||
408 event->event == IB_EVENT_SM_CHANGE) { 408 event->event == IB_EVENT_SM_CHANGE ||
409 event->event == IB_EVENT_CLIENT_REREGISTER) {
409 struct ib_sa_device *sa_dev; 410 struct ib_sa_device *sa_dev;
410 sa_dev = container_of(handler, typeof(*sa_dev), event_handler); 411 sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
411 412
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 557cde3a456..7b82c1907f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -967,12 +967,12 @@ static struct {
967} mthca_hca_table[] = { 967} mthca_hca_table[] = {
968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), 968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
969 .flags = 0 }, 969 .flags = 0 },
970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), 970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
971 .flags = MTHCA_FLAG_PCIE }, 971 .flags = MTHCA_FLAG_PCIE },
972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), 972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
973 .flags = MTHCA_FLAG_MEMFREE | 973 .flags = MTHCA_FLAG_MEMFREE |
974 MTHCA_FLAG_PCIE }, 974 MTHCA_FLAG_PCIE },
975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), 975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
976 .flags = MTHCA_FLAG_MEMFREE | 976 .flags = MTHCA_FLAG_MEMFREE |
977 MTHCA_FLAG_PCIE | 977 MTHCA_FLAG_PCIE |
978 MTHCA_FLAG_SINAI_OPT } 978 MTHCA_FLAG_SINAI_OPT }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 230ae21db8f..265b1d1c4a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1291 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1292 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1293 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1294 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1295 dev->ib_dev.node_type = IB_NODE_CA; 1291 dev->ib_dev.node_type = IB_NODE_CA;
1296 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1292 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1297 dev->ib_dev.dma_device = &dev->pdev->dev; 1293 dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
1316 dev->ib_dev.modify_srq = mthca_modify_srq; 1312 dev->ib_dev.modify_srq = mthca_modify_srq;
1317 dev->ib_dev.query_srq = mthca_query_srq; 1313 dev->ib_dev.query_srq = mthca_query_srq;
1318 dev->ib_dev.destroy_srq = mthca_destroy_srq; 1314 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1315 dev->ib_dev.uverbs_cmd_mask |=
1316 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1317 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1318 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1319 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1319 1320
1320 if (mthca_is_memfree(dev)) 1321 if (mthca_is_memfree(dev))
1321 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1322 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 8de2887ba15..9a5bece3fa5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -136,8 +136,8 @@ struct mthca_ah {
136 * We have one global lock that protects dev->cq/qp_table. Each 136 * We have one global lock that protects dev->cq/qp_table. Each
137 * struct mthca_cq/qp also has its own lock. An individual qp lock 137 * struct mthca_cq/qp also has its own lock. An individual qp lock
138 * may be taken inside of an individual cq lock. Both cqs attached to 138 * may be taken inside of an individual cq lock. Both cqs attached to
139 * a qp may be locked, with the send cq locked first. No other 139 * a qp may be locked, with the cq with the lower cqn locked first.
140 * nesting should be done. 140 * No other nesting should be done.
141 * 141 *
142 * Each struct mthca_cq/qp also has an ref count, protected by the 142 * Each struct mthca_cq/qp also has an ref count, protected by the
143 * corresponding table lock. The pointer from the cq/qp_table to the 143 * corresponding table lock. The pointer from the cq/qp_table to the
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index cd8b6721ac9..2e8f6f36e0a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -99,6 +99,10 @@ enum {
99 MTHCA_QP_BIT_RSC = 1 << 3 99 MTHCA_QP_BIT_RSC = 1 << 3
100}; 100};
101 101
102enum {
103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104};
105
102struct mthca_qp_path { 106struct mthca_qp_path {
103 __be32 port_pkey; 107 __be32 port_pkey;
104 u8 rnr_retry; 108 u8 rnr_retry;
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1259 return 0; 1263 return 0;
1260} 1264}
1261 1265
1266static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1267{
1268 if (send_cq == recv_cq)
1269 spin_lock_irq(&send_cq->lock);
1270 else if (send_cq->cqn < recv_cq->cqn) {
1271 spin_lock_irq(&send_cq->lock);
1272 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1273 } else {
1274 spin_lock_irq(&recv_cq->lock);
1275 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1276 }
1277}
1278
1279static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1280{
1281 if (send_cq == recv_cq)
1282 spin_unlock_irq(&send_cq->lock);
1283 else if (send_cq->cqn < recv_cq->cqn) {
1284 spin_unlock(&recv_cq->lock);
1285 spin_unlock_irq(&send_cq->lock);
1286 } else {
1287 spin_unlock(&send_cq->lock);
1288 spin_unlock_irq(&recv_cq->lock);
1289 }
1290}
1291
1262int mthca_alloc_sqp(struct mthca_dev *dev, 1292int mthca_alloc_sqp(struct mthca_dev *dev,
1263 struct mthca_pd *pd, 1293 struct mthca_pd *pd,
1264 struct mthca_cq *send_cq, 1294 struct mthca_cq *send_cq,
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1311 * Lock CQs here, so that CQ polling code can do QP lookup 1341 * Lock CQs here, so that CQ polling code can do QP lookup
1312 * without taking a lock. 1342 * without taking a lock.
1313 */ 1343 */
1314 spin_lock_irq(&send_cq->lock); 1344 mthca_lock_cqs(send_cq, recv_cq);
1315 if (send_cq != recv_cq)
1316 spin_lock(&recv_cq->lock);
1317 1345
1318 spin_lock(&dev->qp_table.lock); 1346 spin_lock(&dev->qp_table.lock);
1319 mthca_array_clear(&dev->qp_table.qp, mqpn); 1347 mthca_array_clear(&dev->qp_table.qp, mqpn);
1320 spin_unlock(&dev->qp_table.lock); 1348 spin_unlock(&dev->qp_table.lock);
1321 1349
1322 if (send_cq != recv_cq) 1350 mthca_unlock_cqs(send_cq, recv_cq);
1323 spin_unlock(&recv_cq->lock);
1324 spin_unlock_irq(&send_cq->lock);
1325 1351
1326 err_out: 1352 err_out:
1327 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1353 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1355 * Lock CQs here, so that CQ polling code can do QP lookup 1381 * Lock CQs here, so that CQ polling code can do QP lookup
1356 * without taking a lock. 1382 * without taking a lock.
1357 */ 1383 */
1358 spin_lock_irq(&send_cq->lock); 1384 mthca_lock_cqs(send_cq, recv_cq);
1359 if (send_cq != recv_cq)
1360 spin_lock(&recv_cq->lock);
1361 1385
1362 spin_lock(&dev->qp_table.lock); 1386 spin_lock(&dev->qp_table.lock);
1363 mthca_array_clear(&dev->qp_table.qp, 1387 mthca_array_clear(&dev->qp_table.qp,
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1365 --qp->refcount; 1389 --qp->refcount;
1366 spin_unlock(&dev->qp_table.lock); 1390 spin_unlock(&dev->qp_table.lock);
1367 1391
1368 if (send_cq != recv_cq) 1392 mthca_unlock_cqs(send_cq, recv_cq);
1369 spin_unlock(&recv_cq->lock);
1370 spin_unlock_irq(&send_cq->lock);
1371 1393
1372 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1394 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1373 1395
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1502 int i; 1524 int i;
1503 int size; 1525 int size;
1504 int size0 = 0; 1526 int size0 = 0;
1505 u32 f0 = 0; 1527 u32 f0;
1506 int ind; 1528 int ind;
1507 u8 op0 = 0; 1529 u8 op0 = 0;
1508 1530
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1686 if (!size0) { 1708 if (!size0) {
1687 size0 = size; 1709 size0 = size;
1688 op0 = mthca_opcode[wr->opcode]; 1710 op0 = mthca_opcode[wr->opcode];
1711 f0 = wr->send_flags & IB_SEND_FENCE ?
1712 MTHCA_SEND_DOORBELL_FENCE : 0;
1689 } 1713 }
1690 1714
1691 ++ind; 1715 ++ind;
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1843 int i; 1867 int i;
1844 int size; 1868 int size;
1845 int size0 = 0; 1869 int size0 = 0;
1846 u32 f0 = 0; 1870 u32 f0;
1847 int ind; 1871 int ind;
1848 u8 op0 = 0; 1872 u8 op0 = 0;
1849 1873
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2051 if (!size0) { 2075 if (!size0) {
2052 size0 = size; 2076 size0 = size;
2053 op0 = mthca_opcode[wr->opcode]; 2077 op0 = mthca_opcode[wr->opcode];
2078 f0 = wr->send_flags & IB_SEND_FENCE ?
2079 MTHCA_SEND_DOORBELL_FENCE : 0;
2054 } 2080 }
2055 2081
2056 ++ind; 2082 ++ind;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 34b0da5cfa0..1437d7ee3b1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
378 return iser_conn_set_full_featured_mode(conn); 378 return iser_conn_set_full_featured_mode(conn);
379} 379}
380 380
381static void
382iscsi_iser_conn_terminate(struct iscsi_conn *conn)
383{
384 struct iscsi_iser_conn *iser_conn = conn->dd_data;
385 struct iser_conn *ib_conn = iser_conn->ib_conn;
386
387 BUG_ON(!ib_conn);
388 /* starts conn teardown process, waits until all previously *
389 * posted buffers get flushed, deallocates all conn resources */
390 iser_conn_terminate(ib_conn);
391 iser_conn->ib_conn = NULL;
392 conn->recv_lock = NULL;
393}
394
395
396static struct iscsi_transport iscsi_iser_transport; 381static struct iscsi_transport iscsi_iser_transport;
397 382
398static struct iscsi_cls_session * 383static struct iscsi_cls_session *
@@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
555static void 540static void
556iscsi_iser_ep_disconnect(__u64 ep_handle) 541iscsi_iser_ep_disconnect(__u64 ep_handle)
557{ 542{
558 struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); 543 struct iser_conn *ib_conn;
559 544
545 ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
560 if (!ib_conn) 546 if (!ib_conn)
561 return; 547 return;
562 548
563 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); 549 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
564
565 iser_conn_terminate(ib_conn); 550 iser_conn_terminate(ib_conn);
566} 551}
567 552
@@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = {
614 .get_session_param = iscsi_session_get_param, 599 .get_session_param = iscsi_session_get_param,
615 .start_conn = iscsi_iser_conn_start, 600 .start_conn = iscsi_iser_conn_start,
616 .stop_conn = iscsi_conn_stop, 601 .stop_conn = iscsi_conn_stop,
617 /* these are called as part of conn recovery */
618 .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
619 .terminate_conn = iscsi_iser_conn_terminate,
620 /* IO */ 602 /* IO */
621 .send_pdu = iscsi_conn_send_pdu, 603 .send_pdu = iscsi_conn_send_pdu,
622 .get_stats = iscsi_iser_conn_get_stats, 604 .get_stats = iscsi_iser_conn_get_stats,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6bfa0cf4b1d..a86afd0a5ef 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd)
498 i++; 498 i++;
499 dev->rep[REP_PERIOD] = period[i]; 499 dev->rep[REP_PERIOD] = period[i];
500 500
501 while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) 501 while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY])
502 j++; 502 j++;
503 dev->rep[REP_DELAY] = delay[j]; 503 dev->rep[REP_DELAY] = delay[j];
504 504
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index a8efc1af36c..de0f46dd969 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi)
259 return 1; 259 return 1;
260} 260}
261 261
262static struct key_entry keymap_empty[] __initdata = { 262static struct key_entry keymap_empty[] = {
263 { KE_END, 0 } 263 { KE_END, 0 }
264}; 264};
265 265
266static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { 266static struct key_entry keymap_fs_amilo_pro_v2000[] = {
267 { KE_KEY, 0x01, KEY_HELP }, 267 { KE_KEY, 0x01, KEY_HELP },
268 { KE_KEY, 0x11, KEY_PROG1 }, 268 { KE_KEY, 0x11, KEY_PROG1 },
269 { KE_KEY, 0x12, KEY_PROG2 }, 269 { KE_KEY, 0x12, KEY_PROG2 },
@@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
273 { KE_END, 0 } 273 { KE_END, 0 }
274}; 274};
275 275
276static struct key_entry keymap_fujitsu_n3510[] __initdata = { 276static struct key_entry keymap_fujitsu_n3510[] = {
277 { KE_KEY, 0x11, KEY_PROG1 }, 277 { KE_KEY, 0x11, KEY_PROG1 },
278 { KE_KEY, 0x12, KEY_PROG2 }, 278 { KE_KEY, 0x12, KEY_PROG2 },
279 { KE_KEY, 0x36, KEY_WWW }, 279 { KE_KEY, 0x36, KEY_WWW },
@@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = {
285 { KE_END, 0 } 285 { KE_END, 0 }
286}; 286};
287 287
288static struct key_entry keymap_wistron_ms2111[] __initdata = { 288static struct key_entry keymap_wistron_ms2111[] = {
289 { KE_KEY, 0x11, KEY_PROG1 }, 289 { KE_KEY, 0x11, KEY_PROG1 },
290 { KE_KEY, 0x12, KEY_PROG2 }, 290 { KE_KEY, 0x12, KEY_PROG2 },
291 { KE_KEY, 0x13, KEY_PROG3 }, 291 { KE_KEY, 0x13, KEY_PROG3 },
@@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = {
294 { KE_END, 0 } 294 { KE_END, 0 }
295}; 295};
296 296
297static struct key_entry keymap_wistron_ms2141[] __initdata = { 297static struct key_entry keymap_wistron_ms2141[] = {
298 { KE_KEY, 0x11, KEY_PROG1 }, 298 { KE_KEY, 0x11, KEY_PROG1 },
299 { KE_KEY, 0x12, KEY_PROG2 }, 299 { KE_KEY, 0x12, KEY_PROG2 },
300 { KE_WIFI, 0x30, 0 }, 300 { KE_WIFI, 0x30, 0 },
@@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = {
307 { KE_END, 0 } 307 { KE_END, 0 }
308}; 308};
309 309
310static struct key_entry keymap_acer_aspire_1500[] __initdata = { 310static struct key_entry keymap_acer_aspire_1500[] = {
311 { KE_KEY, 0x11, KEY_PROG1 }, 311 { KE_KEY, 0x11, KEY_PROG1 },
312 { KE_KEY, 0x12, KEY_PROG2 }, 312 { KE_KEY, 0x12, KEY_PROG2 },
313 { KE_WIFI, 0x30, 0 }, 313 { KE_WIFI, 0x30, 0 },
@@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = {
317 { KE_END, 0 } 317 { KE_END, 0 }
318}; 318};
319 319
320static struct key_entry keymap_acer_travelmate_240[] __initdata = { 320static struct key_entry keymap_acer_travelmate_240[] = {
321 { KE_KEY, 0x31, KEY_MAIL }, 321 { KE_KEY, 0x31, KEY_MAIL },
322 { KE_KEY, 0x36, KEY_WWW }, 322 { KE_KEY, 0x36, KEY_WWW },
323 { KE_KEY, 0x11, KEY_PROG1 }, 323 { KE_KEY, 0x11, KEY_PROG1 },
@@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = {
327 { KE_END, 0 } 327 { KE_END, 0 }
328}; 328};
329 329
330static struct key_entry keymap_aopen_1559as[] __initdata = { 330static struct key_entry keymap_aopen_1559as[] = {
331 { KE_KEY, 0x01, KEY_HELP }, 331 { KE_KEY, 0x01, KEY_HELP },
332 { KE_KEY, 0x06, KEY_PROG3 }, 332 { KE_KEY, 0x06, KEY_PROG3 },
333 { KE_KEY, 0x11, KEY_PROG1 }, 333 { KE_KEY, 0x11, KEY_PROG1 },
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 8bc9f51ae6c..343afa38f4c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
485 param[0] = 40; 485 param[0] = 40;
486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); 486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
487 487
488 param[0] = 200;
489 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
490 param[0] = 200;
491 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
492 param[0] = 60;
493 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
494
495 if (set_properties) { 488 if (set_properties) {
496 set_bit(BTN_MIDDLE, psmouse->dev->keybit); 489 set_bit(BTN_MIDDLE, psmouse->dev->keybit);
497 set_bit(REL_WHEEL, psmouse->dev->relbit); 490 set_bit(REL_WHEEL, psmouse->dev->relbit);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 217615b3322..93f701ea87b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -710,6 +710,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
710 return -EINVAL; 710 return -EINVAL;
711 } 711 }
712 712
713 m->ti = ti;
714
713 r = parse_features(&as, m, ti); 715 r = parse_features(&as, m, ti);
714 if (r) 716 if (r)
715 goto bad; 717 goto bad;
@@ -751,7 +753,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
751 } 753 }
752 754
753 ti->private = m; 755 ti->private = m;
754 m->ti = ti;
755 756
756 return 0; 757 return 0;
757 758
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index be48cedf986..c54de989eb0 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
255 struct region *reg, *nreg; 255 struct region *reg, *nreg;
256 256
257 read_unlock(&rh->hash_lock); 257 read_unlock(&rh->hash_lock);
258 nreg = mempool_alloc(rh->region_pool, GFP_NOIO); 258 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
259 if (unlikely(!nreg))
260 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
259 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? 261 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
260 RH_CLEAN : RH_NOSYNC; 262 RH_CLEAN : RH_NOSYNC;
261 nreg->rh = rh; 263 nreg->rh = rh;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6d16022a53..8dbab2ef388 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
1597 1597
1598repeat: 1598repeat:
1599 spin_lock_irq(&mddev->write_lock); 1599 spin_lock_irq(&mddev->write_lock);
1600
1601 if (mddev->degraded && mddev->sb_dirty == 3)
1602 /* If the array is degraded, then skipping spares is both
1603 * dangerous and fairly pointless.
1604 * Dangerous because a device that was removed from the array
1605 * might have a event_count that still looks up-to-date,
1606 * so it can be re-added without a resync.
1607 * Pointless because if there are any spares to skip,
1608 * then a recovery will happen and soon that array won't
1609 * be degraded any more and the spare can go back to sleep then.
1610 */
1611 mddev->sb_dirty = 1;
1612
1600 sync_req = mddev->in_sync; 1613 sync_req = mddev->in_sync;
1601 mddev->utime = get_seconds(); 1614 mddev->utime = get_seconds();
1602 if (mddev->sb_dirty == 3) 1615 if (mddev->sb_dirty == 3)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1efe22a2d04..87bfe9e7d8c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1625 return 0; 1625 return 0;
1626 } 1626 }
1627 1627
1628 /* before building a request, check if we can skip these blocks..
1629 * This call the bitmap_start_sync doesn't actually record anything
1630 */
1631 if (mddev->bitmap == NULL && 1628 if (mddev->bitmap == NULL &&
1632 mddev->recovery_cp == MaxSector && 1629 mddev->recovery_cp == MaxSector &&
1630 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1633 conf->fullsync == 0) { 1631 conf->fullsync == 0) {
1634 *skipped = 1; 1632 *skipped = 1;
1635 return max_sector - sector_nr; 1633 return max_sector - sector_nr;
1636 } 1634 }
1635 /* before building a request, check if we can skip these blocks..
1636 * This call the bitmap_start_sync doesn't actually record anything
1637 */
1637 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1638 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1638 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1639 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1639 /* We can skip this block, and probably several more */ 1640 /* We can skip this block, and probably several more */
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index d687a14ec0a..06ac899a9a2 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -393,7 +393,7 @@ static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth)
393 state->bandwidth = bandwidth; 393 state->bandwidth = bandwidth;
394 394
395 if (state->dst_type != DST_TYPE_IS_TERR) 395 if (state->dst_type != DST_TYPE_IS_TERR)
396 return 0; 396 return -EOPNOTSUPP;
397 397
398 switch (bandwidth) { 398 switch (bandwidth) {
399 case BANDWIDTH_6_MHZ: 399 case BANDWIDTH_6_MHZ:
@@ -462,7 +462,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
462 462
463 state->symbol_rate = srate; 463 state->symbol_rate = srate;
464 if (state->dst_type == DST_TYPE_IS_TERR) { 464 if (state->dst_type == DST_TYPE_IS_TERR) {
465 return 0; 465 return -EOPNOTSUPP;
466 } 466 }
467 dprintk(verbose, DST_INFO, 1, "set symrate %u", srate); 467 dprintk(verbose, DST_INFO, 1, "set symrate %u", srate);
468 srate /= 1000; 468 srate /= 1000;
@@ -504,7 +504,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
504static int dst_set_modulation(struct dst_state *state, fe_modulation_t modulation) 504static int dst_set_modulation(struct dst_state *state, fe_modulation_t modulation)
505{ 505{
506 if (state->dst_type != DST_TYPE_IS_CABLE) 506 if (state->dst_type != DST_TYPE_IS_CABLE)
507 return 0; 507 return -EOPNOTSUPP;
508 508
509 state->modulation = modulation; 509 state->modulation = modulation;
510 switch (modulation) { 510 switch (modulation) {
@@ -1234,7 +1234,7 @@ int dst_command(struct dst_state *state, u8 *data, u8 len)
1234 goto error; 1234 goto error;
1235 } 1235 }
1236 if (write_dst(state, data, len)) { 1236 if (write_dst(state, data, len)) {
1237 dprintk(verbose, DST_INFO, 1, "Tring to recover.. "); 1237 dprintk(verbose, DST_INFO, 1, "Trying to recover.. ");
1238 if ((dst_error_recovery(state)) < 0) { 1238 if ((dst_error_recovery(state)) < 0) {
1239 dprintk(verbose, DST_ERROR, 1, "Recovery Failed."); 1239 dprintk(verbose, DST_ERROR, 1, "Recovery Failed.");
1240 goto error; 1240 goto error;
@@ -1328,15 +1328,13 @@ static int dst_tone_power_cmd(struct dst_state *state)
1328{ 1328{
1329 u8 paket[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 }; 1329 u8 paket[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 };
1330 1330
1331 if (state->dst_type == DST_TYPE_IS_TERR) 1331 if (state->dst_type != DST_TYPE_IS_SAT)
1332 return 0; 1332 return -EOPNOTSUPP;
1333 paket[4] = state->tx_tuna[4]; 1333 paket[4] = state->tx_tuna[4];
1334 paket[2] = state->tx_tuna[2]; 1334 paket[2] = state->tx_tuna[2];
1335 paket[3] = state->tx_tuna[3]; 1335 paket[3] = state->tx_tuna[3];
1336 paket[7] = dst_check_sum (paket, 7); 1336 paket[7] = dst_check_sum (paket, 7);
1337 dst_command(state, paket, 8); 1337 return dst_command(state, paket, 8);
1338
1339 return 0;
1340} 1338}
1341 1339
1342static int dst_get_tuna(struct dst_state *state) 1340static int dst_get_tuna(struct dst_state *state)
@@ -1465,7 +1463,7 @@ static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd
1465 u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec }; 1463 u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec };
1466 1464
1467 if (state->dst_type != DST_TYPE_IS_SAT) 1465 if (state->dst_type != DST_TYPE_IS_SAT)
1468 return 0; 1466 return -EOPNOTSUPP;
1469 if (cmd->msg_len > 0 && cmd->msg_len < 5) 1467 if (cmd->msg_len > 0 && cmd->msg_len < 5)
1470 memcpy(&paket[3], cmd->msg, cmd->msg_len); 1468 memcpy(&paket[3], cmd->msg, cmd->msg_len);
1471 else if (cmd->msg_len == 5 && state->dst_hw_cap & DST_TYPE_HAS_DISEQC5) 1469 else if (cmd->msg_len == 5 && state->dst_hw_cap & DST_TYPE_HAS_DISEQC5)
@@ -1473,18 +1471,17 @@ static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd
1473 else 1471 else
1474 return -EINVAL; 1472 return -EINVAL;
1475 paket[7] = dst_check_sum(&paket[0], 7); 1473 paket[7] = dst_check_sum(&paket[0], 7);
1476 dst_command(state, paket, 8); 1474 return dst_command(state, paket, 8);
1477 return 0;
1478} 1475}
1479 1476
1480static int dst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 1477static int dst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
1481{ 1478{
1482 int need_cmd; 1479 int need_cmd, retval = 0;
1483 struct dst_state *state = fe->demodulator_priv; 1480 struct dst_state *state = fe->demodulator_priv;
1484 1481
1485 state->voltage = voltage; 1482 state->voltage = voltage;
1486 if (state->dst_type != DST_TYPE_IS_SAT) 1483 if (state->dst_type != DST_TYPE_IS_SAT)
1487 return 0; 1484 return -EOPNOTSUPP;
1488 1485
1489 need_cmd = 0; 1486 need_cmd = 0;
1490 1487
@@ -1506,9 +1503,9 @@ static int dst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
1506 } 1503 }
1507 1504
1508 if (need_cmd) 1505 if (need_cmd)
1509 dst_tone_power_cmd(state); 1506 retval = dst_tone_power_cmd(state);
1510 1507
1511 return 0; 1508 return retval;
1512} 1509}
1513 1510
1514static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) 1511static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
@@ -1517,7 +1514,7 @@ static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
1517 1514
1518 state->tone = tone; 1515 state->tone = tone;
1519 if (state->dst_type != DST_TYPE_IS_SAT) 1516 if (state->dst_type != DST_TYPE_IS_SAT)
1520 return 0; 1517 return -EOPNOTSUPP;
1521 1518
1522 switch (tone) { 1519 switch (tone) {
1523 case SEC_TONE_OFF: 1520 case SEC_TONE_OFF:
@@ -1533,9 +1530,7 @@ static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
1533 default: 1530 default:
1534 return -EINVAL; 1531 return -EINVAL;
1535 } 1532 }
1536 dst_tone_power_cmd(state); 1533 return dst_tone_power_cmd(state);
1537
1538 return 0;
1539} 1534}
1540 1535
1541static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) 1536static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd)
@@ -1543,7 +1538,7 @@ static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd)
1543 struct dst_state *state = fe->demodulator_priv; 1538 struct dst_state *state = fe->demodulator_priv;
1544 1539
1545 if (state->dst_type != DST_TYPE_IS_SAT) 1540 if (state->dst_type != DST_TYPE_IS_SAT)
1546 return 0; 1541 return -EOPNOTSUPP;
1547 state->minicmd = minicmd; 1542 state->minicmd = minicmd;
1548 switch (minicmd) { 1543 switch (minicmd) {
1549 case SEC_MINI_A: 1544 case SEC_MINI_A:
@@ -1553,9 +1548,7 @@ static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd)
1553 state->tx_tuna[3] = 0xff; 1548 state->tx_tuna[3] = 0xff;
1554 break; 1549 break;
1555 } 1550 }
1556 dst_tone_power_cmd(state); 1551 return dst_tone_power_cmd(state);
1557
1558 return 0;
1559} 1552}
1560 1553
1561 1554
@@ -1608,28 +1601,31 @@ static int dst_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
1608{ 1601{
1609 struct dst_state *state = fe->demodulator_priv; 1602 struct dst_state *state = fe->demodulator_priv;
1610 1603
1611 dst_get_signal(state); 1604 int retval = dst_get_signal(state);
1612 *strength = state->decode_strength; 1605 *strength = state->decode_strength;
1613 1606
1614 return 0; 1607 return retval;
1615} 1608}
1616 1609
1617static int dst_read_snr(struct dvb_frontend *fe, u16 *snr) 1610static int dst_read_snr(struct dvb_frontend *fe, u16 *snr)
1618{ 1611{
1619 struct dst_state *state = fe->demodulator_priv; 1612 struct dst_state *state = fe->demodulator_priv;
1620 1613
1621 dst_get_signal(state); 1614 int retval = dst_get_signal(state);
1622 *snr = state->decode_snr; 1615 *snr = state->decode_snr;
1623 1616
1624 return 0; 1617 return retval;
1625} 1618}
1626 1619
1627static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) 1620static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
1628{ 1621{
1622 int retval = -EINVAL;
1629 struct dst_state *state = fe->demodulator_priv; 1623 struct dst_state *state = fe->demodulator_priv;
1630 1624
1631 if (p != NULL) { 1625 if (p != NULL) {
1632 dst_set_freq(state, p->frequency); 1626 retval = dst_set_freq(state, p->frequency);
1627 if(retval != 0)
1628 return retval;
1633 dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency); 1629 dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
1634 1630
1635 if (state->dst_type == DST_TYPE_IS_SAT) { 1631 if (state->dst_type == DST_TYPE_IS_SAT) {
@@ -1647,10 +1643,10 @@ static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_paramet
1647 dst_set_symbolrate(state, p->u.qam.symbol_rate); 1643 dst_set_symbolrate(state, p->u.qam.symbol_rate);
1648 dst_set_modulation(state, p->u.qam.modulation); 1644 dst_set_modulation(state, p->u.qam.modulation);
1649 } 1645 }
1650 dst_write_tuna(fe); 1646 retval = dst_write_tuna(fe);
1651 } 1647 }
1652 1648
1653 return 0; 1649 return retval;
1654} 1650}
1655 1651
1656static int dst_tune_frontend(struct dvb_frontend* fe, 1652static int dst_tune_frontend(struct dvb_frontend* fe,
diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile
index 11054657fdb..0b5182835cc 100644
--- a/drivers/media/dvb/dvb-core/Makefile
+++ b/drivers/media/dvb/dvb-core/Makefile
@@ -2,8 +2,8 @@
2# Makefile for the kernel DVB device drivers. 2# Makefile for the kernel DVB device drivers.
3# 3#
4 4
5dvb-core-objs = dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \ 5dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
6 dvb_ca_en50221.o dvb_frontend.o \ 6 dvb_ca_en50221.o dvb_frontend.o \
7 dvb_net.o dvb_ringbuffer.o dvb_math.o 7 dvb_net.o dvb_ringbuffer.o dvb_math.o
8 8
9obj-$(CONFIG_DVB_CORE) += dvb-core.o 9obj-$(CONFIG_DVB_CORE) += dvb-core.o
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index de3128a31de..220076b1b95 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -350,5 +350,15 @@ config RADIO_ZOLTRIX_PORT
350 help 350 help
351 Enter the I/O port of your Zoltrix radio card. 351 Enter the I/O port of your Zoltrix radio card.
352 352
353endmenu 353config USB_DSBR
354 tristate "D-Link USB FM radio support (EXPERIMENTAL)"
355 depends on USB && VIDEO_V4L1 && EXPERIMENTAL
356 ---help---
357 Say Y here if you want to connect this type of radio to your
358 computer's USB port. Note that the audio is not digital, and
359 you must connect the line out connector to a sound card or a
360 set of speakers.
354 361
362 To compile this driver as a module, choose M here: the
363 module will be called dsbr100.
364endmenu
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index e95b6805e00..cf55a18e3dd 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
20obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o 20obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
21obj-$(CONFIG_RADIO_TRUST) += radio-trust.o 21obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
22obj-$(CONFIG_RADIO_MAESTRO) += radio-maestro.o 22obj-$(CONFIG_RADIO_MAESTRO) += radio-maestro.o
23obj-$(CONFIG_USB_DSBR) += dsbr100.o
23 24
24EXTRA_CFLAGS += -Isound 25EXTRA_CFLAGS += -Isound
diff --git a/drivers/media/video/dsbr100.c b/drivers/media/radio/dsbr100.c
index f7e33f9ee8e..f7e33f9ee8e 100644
--- a/drivers/media/video/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index fe56862d51e..732bf1e7c32 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -449,18 +449,6 @@ source "drivers/media/video/pvrusb2/Kconfig"
449 449
450source "drivers/media/video/em28xx/Kconfig" 450source "drivers/media/video/em28xx/Kconfig"
451 451
452config USB_DSBR
453 tristate "D-Link USB FM radio support (EXPERIMENTAL)"
454 depends on USB && VIDEO_V4L1 && EXPERIMENTAL
455 ---help---
456 Say Y here if you want to connect this type of radio to your
457 computer's USB port. Note that the audio is not digital, and
458 you must connect the line out connector to a sound card or a
459 set of speakers.
460
461 To compile this driver as a module, choose M here: the
462 module will be called dsbr100.
463
464source "drivers/media/video/usbvideo/Kconfig" 452source "drivers/media/video/usbvideo/Kconfig"
465 453
466source "drivers/media/video/et61x251/Kconfig" 454source "drivers/media/video/et61x251/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 353d61cfac1..e82e511f2a7 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -77,7 +77,6 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
77obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o 77obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
78 78
79obj-$(CONFIG_USB_DABUSB) += dabusb.o 79obj-$(CONFIG_USB_DABUSB) += dabusb.o
80obj-$(CONFIG_USB_DSBR) += dsbr100.o
81obj-$(CONFIG_USB_OV511) += ov511.o 80obj-$(CONFIG_USB_OV511) += ov511.o
82obj-$(CONFIG_USB_SE401) += se401.o 81obj-$(CONFIG_USB_SE401) += se401.o
83obj-$(CONFIG_USB_STV680) += stv680.o 82obj-$(CONFIG_USB_STV680) += stv680.o
@@ -91,6 +90,7 @@ obj-$(CONFIG_USB_ZC0301) += zc0301/
91obj-$(CONFIG_USB_IBMCAM) += usbvideo/ 90obj-$(CONFIG_USB_IBMCAM) += usbvideo/
92obj-$(CONFIG_USB_KONICAWC) += usbvideo/ 91obj-$(CONFIG_USB_KONICAWC) += usbvideo/
93obj-$(CONFIG_USB_VICAM) += usbvideo/ 92obj-$(CONFIG_USB_VICAM) += usbvideo/
93obj-$(CONFIG_USB_QUICKCAM_MESSENGER) += usbvideo/
94 94
95obj-$(CONFIG_VIDEO_VIVI) += vivi.o 95obj-$(CONFIG_VIDEO_VIVI) += vivi.o
96 96
diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c
index 9dddff42ec1..b69ee119481 100644
--- a/drivers/media/video/compat_ioctl32.c
+++ b/drivers/media/video/compat_ioctl32.c
@@ -21,7 +21,7 @@
21 21
22#ifdef CONFIG_COMPAT 22#ifdef CONFIG_COMPAT
23 23
24 24#ifdef CONFIG_VIDEO_V4L1_COMPAT
25struct video_tuner32 { 25struct video_tuner32 {
26 compat_int_t tuner; 26 compat_int_t tuner;
27 char name[32]; 27 char name[32];
@@ -107,6 +107,7 @@ struct video_window32 {
107 compat_caddr_t clips; 107 compat_caddr_t clips;
108 compat_int_t clipcount; 108 compat_int_t clipcount;
109}; 109};
110#endif
110 111
111static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 112static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
112{ 113{
@@ -124,6 +125,7 @@ static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
124} 125}
125 126
126 127
128#ifdef CONFIG_VIDEO_V4L1_COMPAT
127/* You get back everything except the clips... */ 129/* You get back everything except the clips... */
128static int put_video_window32(struct video_window *kp, struct video_window32 __user *up) 130static int put_video_window32(struct video_window *kp, struct video_window32 __user *up)
129{ 131{
@@ -138,6 +140,7 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
138 return -EFAULT; 140 return -EFAULT;
139 return 0; 141 return 0;
140} 142}
143#endif
141 144
142struct v4l2_clip32 145struct v4l2_clip32
143{ 146{
@@ -490,6 +493,7 @@ static inline int put_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user
490 return 0; 493 return 0;
491} 494}
492 495
496#ifdef CONFIG_VIDEO_V4L1_COMPAT
493struct video_code32 497struct video_code32
494{ 498{
495 char loadwhat[16]; /* name or tag of file being passed */ 499 char loadwhat[16]; /* name or tag of file being passed */
@@ -517,6 +521,8 @@ static inline int microcode32(struct video_code *kp, struct video_code32 __user
517#define VIDIOCSFREQ32 _IOW('v',15, u32) 521#define VIDIOCSFREQ32 _IOW('v',15, u32)
518#define VIDIOCSMICROCODE32 _IOW('v',27, struct video_code32) 522#define VIDIOCSMICROCODE32 _IOW('v',27, struct video_code32)
519 523
524#endif
525
520/* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */ 526/* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */
521#define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4) 527#define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4)
522#define VIDIOC_G_FMT32 _IOWR ('V', 4, struct v4l2_format32) 528#define VIDIOC_G_FMT32 _IOWR ('V', 4, struct v4l2_format32)
@@ -537,6 +543,7 @@ static inline int microcode32(struct video_code *kp, struct video_code32 __user
537#define VIDIOC_S_INPUT32 _IOWR ('V', 39, compat_int_t) 543#define VIDIOC_S_INPUT32 _IOWR ('V', 39, compat_int_t)
538#define VIDIOC_TRY_FMT32 _IOWR ('V', 64, struct v4l2_format32) 544#define VIDIOC_TRY_FMT32 _IOWR ('V', 64, struct v4l2_format32)
539 545
546#ifdef CONFIG_VIDEO_V4L1_COMPAT
540enum { 547enum {
541 MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip) 548 MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip)
542}; 549};
@@ -601,14 +608,17 @@ static int do_set_window(struct file *file, unsigned int cmd, unsigned long arg)
601 608
602 return native_ioctl(file, VIDIOCSWIN, (unsigned long)vw); 609 return native_ioctl(file, VIDIOCSWIN, (unsigned long)vw);
603} 610}
611#endif
604 612
605static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 613static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
606{ 614{
607 union { 615 union {
616#ifdef CONFIG_VIDEO_V4L1_COMPAT
608 struct video_tuner vt; 617 struct video_tuner vt;
609 struct video_buffer vb; 618 struct video_buffer vb;
610 struct video_window vw; 619 struct video_window vw;
611 struct video_code vc; 620 struct video_code vc;
621#endif
612 struct v4l2_format v2f; 622 struct v4l2_format v2f;
613 struct v4l2_buffer v2b; 623 struct v4l2_buffer v2b;
614 struct v4l2_framebuffer v2fb; 624 struct v4l2_framebuffer v2fb;
@@ -624,6 +634,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
624 634
625 /* First, convert the command. */ 635 /* First, convert the command. */
626 switch(cmd) { 636 switch(cmd) {
637#ifdef CONFIG_VIDEO_V4L1_COMPAT
627 case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break; 638 case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
628 case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break; 639 case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
629 case VIDIOCGWIN32: cmd = VIDIOCGWIN; break; 640 case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
@@ -631,6 +642,8 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
631 case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break; 642 case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
632 case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break; 643 case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
633 case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break; 644 case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
645 case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break;
646#endif
634 case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break; 647 case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
635 case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break; 648 case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
636 case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break; 649 case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
@@ -647,10 +660,10 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
647 case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; 660 case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
648 case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; 661 case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
649 case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break; 662 case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
650 case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break;
651 }; 663 };
652 664
653 switch(cmd) { 665 switch(cmd) {
666#ifdef CONFIG_VIDEO_V4L1_COMPAT
654 case VIDIOCSTUNER: 667 case VIDIOCSTUNER:
655 case VIDIOCGTUNER: 668 case VIDIOCGTUNER:
656 err = get_video_tuner32(&karg.vt, up); 669 err = get_video_tuner32(&karg.vt, up);
@@ -664,6 +677,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
664 break; 677 break;
665 678
666 case VIDIOCSFREQ: 679 case VIDIOCSFREQ:
680#endif
667 case VIDIOC_S_INPUT: 681 case VIDIOC_S_INPUT:
668 case VIDIOC_OVERLAY: 682 case VIDIOC_OVERLAY:
669 case VIDIOC_STREAMON: 683 case VIDIOC_STREAMON:
@@ -717,18 +731,21 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
717 compatible_arg = 0; 731 compatible_arg = 0;
718 break; 732 break;
719 733
734#ifdef CONFIG_VIDEO_V4L1_COMPAT
720 case VIDIOCGWIN: 735 case VIDIOCGWIN:
721 case VIDIOCGFBUF: 736 case VIDIOCGFBUF:
722 case VIDIOCGFREQ: 737 case VIDIOCGFREQ:
738#endif
723 case VIDIOC_G_FBUF: 739 case VIDIOC_G_FBUF:
724 case VIDIOC_G_INPUT: 740 case VIDIOC_G_INPUT:
725 compatible_arg = 0; 741 compatible_arg = 0;
742#ifdef CONFIG_VIDEO_V4L1_COMPAT
726 case VIDIOCSMICROCODE: 743 case VIDIOCSMICROCODE:
727 err = microcode32(&karg.vc, up); 744 err = microcode32(&karg.vc, up);
728 compatible_arg = 0; 745 compatible_arg = 0;
729 break; 746 break;
747#endif
730 }; 748 };
731
732 if(err) 749 if(err)
733 goto out; 750 goto out;
734 751
@@ -743,6 +760,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
743 } 760 }
744 if(err == 0) { 761 if(err == 0) {
745 switch(cmd) { 762 switch(cmd) {
763#ifdef CONFIG_VIDEO_V4L1_COMPAT
746 case VIDIOCGTUNER: 764 case VIDIOCGTUNER:
747 err = put_video_tuner32(&karg.vt, up); 765 err = put_video_tuner32(&karg.vt, up);
748 break; 766 break;
@@ -754,7 +772,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
754 case VIDIOCGFBUF: 772 case VIDIOCGFBUF:
755 err = put_video_buffer32(&karg.vb, up); 773 err = put_video_buffer32(&karg.vb, up);
756 break; 774 break;
757 775#endif
758 case VIDIOC_G_FBUF: 776 case VIDIOC_G_FBUF:
759 err = put_v4l2_framebuffer32(&karg.v2fb, up); 777 err = put_v4l2_framebuffer32(&karg.v2fb, up);
760 break; 778 break;
@@ -792,7 +810,9 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg
792 err = put_v4l2_input32(&karg.v2i, up); 810 err = put_v4l2_input32(&karg.v2i, up);
793 break; 811 break;
794 812
813#ifdef CONFIG_VIDEO_V4L1_COMPAT
795 case VIDIOCGFREQ: 814 case VIDIOCGFREQ:
815#endif
796 case VIDIOC_G_INPUT: 816 case VIDIOC_G_INPUT:
797 err = put_user(((u32)karg.vx), (u32 __user *)up); 817 err = put_user(((u32)karg.vx), (u32 __user *)up);
798 break; 818 break;
@@ -810,6 +830,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
810 return ret; 830 return ret;
811 831
812 switch (cmd) { 832 switch (cmd) {
833#ifdef CONFIG_VIDEO_V4L1_COMPAT
813 case VIDIOCSWIN32: 834 case VIDIOCSWIN32:
814 ret = do_set_window(file, cmd, arg); 835 ret = do_set_window(file, cmd, arg);
815 break; 836 break;
@@ -820,6 +841,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
820 case VIDIOCSFBUF32: 841 case VIDIOCSFBUF32:
821 case VIDIOCGFREQ32: 842 case VIDIOCGFREQ32:
822 case VIDIOCSFREQ32: 843 case VIDIOCSFREQ32:
844#endif
823 case VIDIOC_QUERYCAP: 845 case VIDIOC_QUERYCAP:
824 case VIDIOC_ENUM_FMT: 846 case VIDIOC_ENUM_FMT:
825 case VIDIOC_G_FMT32: 847 case VIDIOC_G_FMT32:
@@ -851,6 +873,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
851 ret = do_video_ioctl(file, cmd, arg); 873 ret = do_video_ioctl(file, cmd, arg);
852 break; 874 break;
853 875
876#ifdef CONFIG_VIDEO_V4L1_COMPAT
854 /* Little v, the video4linux ioctls (conflict?) */ 877 /* Little v, the video4linux ioctls (conflict?) */
855 case VIDIOCGCAP: 878 case VIDIOCGCAP:
856 case VIDIOCGCHAN: 879 case VIDIOCGCHAN:
@@ -879,6 +902,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
879 case _IOR('v' , BASE_VIDIOCPRIVATE+7, int): 902 case _IOR('v' , BASE_VIDIOCPRIVATE+7, int):
880 ret = native_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 903 ret = native_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
881 break; 904 break;
905#endif
882 default: 906 default:
883 v4l_print_ioctl("compat_ioctl32", cmd); 907 v4l_print_ioctl("compat_ioctl32", cmd);
884 } 908 }
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 5c2036b40ea..7bb7589a07c 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -104,8 +104,8 @@ u32 cx25840_read4(struct i2c_client * client, u16 addr)
104 if (i2c_master_recv(client, buffer, 4) < 4) 104 if (i2c_master_recv(client, buffer, 4) < 4)
105 return 0; 105 return 0;
106 106
107 return (buffer[0] << 24) | (buffer[1] << 16) | 107 return (buffer[3] << 24) | (buffer[2] << 16) |
108 (buffer[2] << 8) | buffer[3]; 108 (buffer[1] << 8) | buffer[0];
109} 109}
110 110
111int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask, 111int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask,
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 547cdbdb644..94c92bacc34 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1225,7 +1225,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
1225 struct v4l2_format *f = arg; 1225 struct v4l2_format *f = arg;
1226 return cx8800_try_fmt(dev,fh,f); 1226 return cx8800_try_fmt(dev,fh,f);
1227 } 1227 }
1228#ifdef CONFIG_V4L1_COMPAT 1228#ifdef CONFIG_VIDEO_V4L1_COMPAT
1229 /* --- streaming capture ------------------------------------- */ 1229 /* --- streaming capture ------------------------------------- */
1230 case VIDIOCGMBUF: 1230 case VIDIOCGMBUF:
1231 { 1231 {
@@ -1584,7 +1584,7 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
1584 *id = 0; 1584 *id = 0;
1585 return 0; 1585 return 0;
1586 } 1586 }
1587#ifdef CONFIG_V4L1_COMPAT 1587#ifdef CONFIG_VIDEO_V4L1_COMPAT
1588 case VIDIOCSTUNER: 1588 case VIDIOCSTUNER:
1589 { 1589 {
1590 struct video_tuner *v = arg; 1590 struct video_tuner *v = arg;
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index f2fd9195b3a..ed02ff81138 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -961,10 +961,10 @@ int msp34xxg_thread(void *data)
961 /* setup the chip*/ 961 /* setup the chip*/
962 msp34xxg_reset(client); 962 msp34xxg_reset(client);
963 state->std = state->radio ? 0x40 : msp_standard; 963 state->std = state->radio ? 0x40 : msp_standard;
964 if (state->std != 1)
965 goto unmute;
966 /* start autodetect */ 964 /* start autodetect */
967 msp_write_dem(client, 0x20, state->std); 965 msp_write_dem(client, 0x20, state->std);
966 if (state->std != 1)
967 goto unmute;
968 968
969 /* watch autodetect */ 969 /* watch autodetect */
970 v4l_dbg(1, msp_debug, client, "started autodetect, waiting for result\n"); 970 v4l_dbg(1, msp_debug, client, "started autodetect, waiting for result\n");
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 697145e0bf1..8fdf7101d3b 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -30,7 +30,7 @@ config USB_PWC
30 30
31config USB_PWC_DEBUG 31config USB_PWC_DEBUG
32 bool "USB Philips Cameras verbose debug" 32 bool "USB Philips Cameras verbose debug"
33 depends USB_PWC 33 depends on USB_PWC
34 help 34 help
35 Say Y here in order to have the pwc driver generate verbose debugging 35 Say Y here in order to have the pwc driver generate verbose debugging
36 messages. 36 messages.
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 47d0d83a026..d4703944df9 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -160,6 +160,7 @@ static struct file_operations pwc_fops = {
160 .poll = pwc_video_poll, 160 .poll = pwc_video_poll,
161 .mmap = pwc_video_mmap, 161 .mmap = pwc_video_mmap,
162 .ioctl = pwc_video_ioctl, 162 .ioctl = pwc_video_ioctl,
163 .compat_ioctl = v4l_compat_ioctl32,
163 .llseek = no_llseek, 164 .llseek = no_llseek,
164}; 165};
165static struct video_device pwc_template = { 166static struct video_device pwc_template = {
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 8656f2400e1..2c171af9a9f 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -2087,7 +2087,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
2087 struct v4l2_format *f = arg; 2087 struct v4l2_format *f = arg;
2088 return saa7134_try_fmt(dev,fh,f); 2088 return saa7134_try_fmt(dev,fh,f);
2089 } 2089 }
2090#ifdef CONFIG_V4L1_COMPAT 2090#ifdef CONFIG_VIDEO_V4L1_COMPAT
2091 case VIDIOCGMBUF: 2091 case VIDIOCGMBUF:
2092 { 2092 {
2093 struct video_mbuf *mbuf = arg; 2093 struct video_mbuf *mbuf = arg;
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index a167e17c6dc..d7eadc2c298 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -1027,10 +1027,11 @@ static struct tuner_params tuner_tnf_5335mf_params[] = {
1027/* 70-79 */ 1027/* 70-79 */
1028/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */ 1028/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */
1029 1029
1030/* '+ 4' turns on the Low Noise Amplifier */
1030static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = { 1031static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
1031 { 16 * 130.00 /*MHz*/, 0xce, 0x01, }, 1032 { 16 * 130.00 /*MHz*/, 0xce, 0x01 + 4, },
1032 { 16 * 364.50 /*MHz*/, 0xce, 0x02, }, 1033 { 16 * 364.50 /*MHz*/, 0xce, 0x02 + 4, },
1033 { 16 * 999.99 , 0xce, 0x08, }, 1034 { 16 * 999.99 , 0xce, 0x08 + 4, },
1034}; 1035};
1035 1036
1036static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = { 1037static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
@@ -1060,10 +1061,11 @@ static struct tuner_params tuner_thomson_fe6600_params[] = {
1060 1061
1061/* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */ 1062/* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */
1062 1063
1064/* '+ 4' turns on the Low Noise Amplifier */
1063static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = { 1065static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = {
1064 { 16 * 146.25 /*MHz*/, 0xce, 0x01, }, 1066 { 16 * 146.25 /*MHz*/, 0xce, 0x01 + 4, },
1065 { 16 * 428.50 /*MHz*/, 0xce, 0x02, }, 1067 { 16 * 428.50 /*MHz*/, 0xce, 0x02 + 4, },
1066 { 16 * 999.99 , 0xce, 0x08, }, 1068 { 16 * 999.99 , 0xce, 0x08 + 4, },
1067}; 1069};
1068 1070
1069static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = { 1071static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index d83a2c84d23..d7c3fcbc80f 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -599,6 +599,10 @@ v4l_compat_translate_ioctl(struct inode *inode,
599 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n",err); 599 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n",err);
600 break; 600 break;
601 } 601 }
602
603 pict->depth = ((fmt2->fmt.pix.bytesperline<<3)
604 + (fmt2->fmt.pix.width-1) )
605 /fmt2->fmt.pix.width;
602 pict->palette = pixelformat_to_palette( 606 pict->palette = pixelformat_to_palette(
603 fmt2->fmt.pix.pixelformat); 607 fmt2->fmt.pix.pixelformat);
604 break; 608 break;
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 2ecbeffb559..8d972ffdaf9 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -202,7 +202,7 @@ static char *v4l2_memory_names[] = {
202/* ------------------------------------------------------------------ */ 202/* ------------------------------------------------------------------ */
203/* debug help functions */ 203/* debug help functions */
204 204
205#ifdef CONFIG_V4L1_COMPAT 205#ifdef CONFIG_VIDEO_V4L1_COMPAT
206static const char *v4l1_ioctls[] = { 206static const char *v4l1_ioctls[] = {
207 [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP", 207 [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP",
208 [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN", 208 [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN",
@@ -301,7 +301,7 @@ static const char *v4l2_ioctls[] = {
301#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) 301#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
302 302
303static const char *v4l2_int_ioctls[] = { 303static const char *v4l2_int_ioctls[] = {
304#ifdef CONFIG_V4L1_COMPAT 304#ifdef CONFIG_VIDEO_V4L1_COMPAT
305 [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES", 305 [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES",
306 [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS", 306 [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS",
307 [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM", 307 [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM",
@@ -367,7 +367,7 @@ void v4l_printk_ioctl(unsigned int cmd)
367 (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ? 367 (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ?
368 v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd); 368 v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
369 break; 369 break;
370#ifdef CONFIG_V4L1_COMPAT 370#ifdef CONFIG_VIDEO_V4L1_COMPAT
371 case 'v': 371 case 'v':
372 printk("v4l1 ioctl %s, dir=%s (0x%08x)\n", 372 printk("v4l1 ioctl %s, dir=%s (0x%08x)\n",
373 (_IOC_NR(cmd) < V4L1_IOCTLS) ? 373 (_IOC_NR(cmd) < V4L1_IOCTLS) ?
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 0fc90cd393f..88bf2af2a0e 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -760,7 +760,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
760 ret=vfd->vidioc_overlay(file, fh, *i); 760 ret=vfd->vidioc_overlay(file, fh, *i);
761 break; 761 break;
762 } 762 }
763#ifdef CONFIG_V4L1_COMPAT 763#ifdef CONFIG_VIDEO_V4L1_COMPAT
764 /* --- streaming capture ------------------------------------- */ 764 /* --- streaming capture ------------------------------------- */
765 case VIDIOCGMBUF: 765 case VIDIOCGMBUF:
766 { 766 {
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 38bd0c1018c..841884af0cc 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -986,7 +986,7 @@ static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
986 file->f_flags & O_NONBLOCK)); 986 file->f_flags & O_NONBLOCK));
987} 987}
988 988
989#ifdef CONFIG_V4L1_COMPAT 989#ifdef CONFIG_VIDEO_V4L1_COMPAT
990static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf) 990static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
991{ 991{
992 struct vivi_fh *fh=priv; 992 struct vivi_fh *fh=priv;
@@ -1328,7 +1328,7 @@ static struct video_device vivi = {
1328 .vidioc_s_ctrl = vidioc_s_ctrl, 1328 .vidioc_s_ctrl = vidioc_s_ctrl,
1329 .vidioc_streamon = vidioc_streamon, 1329 .vidioc_streamon = vidioc_streamon,
1330 .vidioc_streamoff = vidioc_streamoff, 1330 .vidioc_streamoff = vidioc_streamoff,
1331#ifdef CONFIG_V4L1_COMPAT 1331#ifdef CONFIG_VIDEO_V4L1_COMPAT
1332 .vidiocgmbuf = vidiocgmbuf, 1332 .vidiocgmbuf = vidiocgmbuf,
1333#endif 1333#endif
1334 .tvnorms = tvnorms, 1334 .tvnorms = tvnorms,
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d4cb144ab40..c537d71c18e 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
641 struct list_head fc_rports; 641 struct list_head fc_rports;
642 spinlock_t fc_rescan_work_lock; 642 spinlock_t fc_rescan_work_lock;
643 int fc_rescan_work_count;
644 struct work_struct fc_rescan_work; 643 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 644 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 645 struct workqueue_struct *fc_rescan_work_q;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 90da7d63b08..85696f34c31 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
669 * if still doing discovery, 669 * if still doing discovery,
670 * hang loose a while until finished 670 * hang loose a while until finished
671 */ 671 */
672 if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { 672 if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
673 (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
674 (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
675 == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
673 if (count-- > 0) { 676 if (count-- > 0) {
674 msleep(100); 677 msleep(100);
675 goto try_again; 678 goto try_again;
@@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg)
895{ 898{
896 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 899 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
897 int ii; 900 int ii;
898 int work_to_do;
899 u64 pn; 901 u64 pn;
900 unsigned long flags;
901 struct mptfc_rport_info *ri; 902 struct mptfc_rport_info *ri;
902 903
903 do { 904 /* start by tagging all ports as missing */
904 /* start by tagging all ports as missing */ 905 list_for_each_entry(ri, &ioc->fc_rports, list) {
905 list_for_each_entry(ri, &ioc->fc_rports, list) { 906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { 907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
908 }
909 } 908 }
909 }
910 910
911 /* 911 /*
912 * now rescan devices known to adapter, 912 * now rescan devices known to adapter,
913 * will reregister existing rports 913 * will reregister existing rports
914 */ 914 */
915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { 915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
916 (void) mptfc_GetFcPortPage0(ioc, ii); 916 (void) mptfc_GetFcPortPage0(ioc, ii);
917 mptfc_init_host_attr(ioc,ii); /* refresh */ 917 mptfc_init_host_attr(ioc, ii); /* refresh */
918 mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); 918 mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
919 } 919 }
920 920
921 /* delete devices still missing */ 921 /* delete devices still missing */
922 list_for_each_entry(ri, &ioc->fc_rports, list) { 922 list_for_each_entry(ri, &ioc->fc_rports, list) {
923 /* if newly missing, delete it */ 923 /* if newly missing, delete it */
924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { 924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
925 925
926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| 926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
927 MPT_RPORT_INFO_FLAGS_MISSING); 927 MPT_RPORT_INFO_FLAGS_MISSING);
928 fc_remote_port_delete(ri->rport); /* won't sleep */ 928 fc_remote_port_delete(ri->rport); /* won't sleep */
929 ri->rport = NULL; 929 ri->rport = NULL;
930 930
931 pn = (u64)ri->pg0.WWPN.High << 32 | 931 pn = (u64)ri->pg0.WWPN.High << 32 |
932 (u64)ri->pg0.WWPN.Low; 932 (u64)ri->pg0.WWPN.Low;
933 dfcprintk ((MYIOC_s_INFO_FMT 933 dfcprintk ((MYIOC_s_INFO_FMT
934 "mptfc_rescan.%d: %llx deleted\n", 934 "mptfc_rescan.%d: %llx deleted\n",
935 ioc->name, 935 ioc->name,
936 ioc->sh->host_no, 936 ioc->sh->host_no,
937 (unsigned long long)pn)); 937 (unsigned long long)pn));
938 }
939 } 938 }
940 939 }
941 /*
942 * allow multiple passes as target state
943 * might have changed during scan
944 */
945 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
946 if (ioc->fc_rescan_work_count > 2) /* only need one more */
947 ioc->fc_rescan_work_count = 2;
948 work_to_do = --ioc->fc_rescan_work_count;
949 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
950 } while (work_to_do);
951} 940}
952 941
953static int 942static int
@@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1159 * by doing it via the workqueue, some locking is eliminated 1148 * by doing it via the workqueue, some locking is eliminated
1160 */ 1149 */
1161 1150
1162 ioc->fc_rescan_work_count = 1;
1163 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); 1151 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
1164 flush_workqueue(ioc->fc_rescan_work_q); 1152 flush_workqueue(ioc->fc_rescan_work_q);
1165 1153
@@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
1202 case MPI_EVENT_RESCAN: 1190 case MPI_EVENT_RESCAN:
1203 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1191 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1204 if (ioc->fc_rescan_work_q) { 1192 if (ioc->fc_rescan_work_q) {
1205 if (ioc->fc_rescan_work_count++ == 0) { 1193 queue_work(ioc->fc_rescan_work_q,
1206 queue_work(ioc->fc_rescan_work_q, 1194 &ioc->fc_rescan_work);
1207 &ioc->fc_rescan_work);
1208 }
1209 } 1195 }
1210 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1196 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1211 break; 1197 break;
@@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1248 mptfc_SetFcPortPage1_defaults(ioc); 1234 mptfc_SetFcPortPage1_defaults(ioc);
1249 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1235 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1250 if (ioc->fc_rescan_work_q) { 1236 if (ioc->fc_rescan_work_q) {
1251 if (ioc->fc_rescan_work_count++ == 0) { 1237 queue_work(ioc->fc_rescan_work_q,
1252 queue_work(ioc->fc_rescan_work_q, 1238 &ioc->fc_rescan_work);
1253 &ioc->fc_rescan_work);
1254 }
1255 } 1239 }
1256 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1240 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1257 } 1241 }
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 0b9682e9a35..74f8cdeeff0 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -79,7 +79,8 @@ static int mmc_queue_thread(void *d)
79 spin_lock_irq(q->queue_lock); 79 spin_lock_irq(q->queue_lock);
80 set_current_state(TASK_INTERRUPTIBLE); 80 set_current_state(TASK_INTERRUPTIBLE);
81 if (!blk_queue_plugged(q)) 81 if (!blk_queue_plugged(q))
82 mq->req = req = elv_next_request(q); 82 req = elv_next_request(q);
83 mq->req = req;
83 spin_unlock_irq(q->queue_lock); 84 spin_unlock_irq(q->queue_lock);
84 85
85 if (!req) { 86 if (!req) {
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 8a30ef3ae41..c351c6d1a18 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -41,7 +41,7 @@
41#include "wbsd.h" 41#include "wbsd.h"
42 42
43#define DRIVER_NAME "wbsd" 43#define DRIVER_NAME "wbsd"
44#define DRIVER_VERSION "1.5" 44#define DRIVER_VERSION "1.6"
45 45
46#define DBG(x...) \ 46#define DBG(x...) \
47 pr_debug(DRIVER_NAME ": " x) 47 pr_debug(DRIVER_NAME ": " x)
@@ -1439,13 +1439,13 @@ static int __devinit wbsd_scan(struct wbsd_host *host)
1439 1439
1440static int __devinit wbsd_request_region(struct wbsd_host *host, int base) 1440static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1441{ 1441{
1442 if (io & 0x7) 1442 if (base & 0x7)
1443 return -EINVAL; 1443 return -EINVAL;
1444 1444
1445 if (!request_region(base, 8, DRIVER_NAME)) 1445 if (!request_region(base, 8, DRIVER_NAME))
1446 return -EIO; 1446 return -EIO;
1447 1447
1448 host->base = io; 1448 host->base = base;
1449 1449
1450 return 0; 1450 return 0;
1451} 1451}
@@ -1773,7 +1773,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1773 /* 1773 /*
1774 * Request resources. 1774 * Request resources.
1775 */ 1775 */
1776 ret = wbsd_request_resources(host, io, irq, dma); 1776 ret = wbsd_request_resources(host, base, irq, dma);
1777 if (ret) { 1777 if (ret) {
1778 wbsd_release_resources(host); 1778 wbsd_release_resources(host);
1779 wbsd_free_mmc(dev); 1779 wbsd_free_mmc(dev);
@@ -1861,6 +1861,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1861 1861
1862static int __devinit wbsd_probe(struct platform_device *dev) 1862static int __devinit wbsd_probe(struct platform_device *dev)
1863{ 1863{
1864 /* Use the module parameters for resources */
1864 return wbsd_init(&dev->dev, io, irq, dma, 0); 1865 return wbsd_init(&dev->dev, io, irq, dma, 0);
1865} 1866}
1866 1867
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index d7897dc6b3c..a0ba07c36ee 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
130 if (ctrl & NAND_CTRL_CHANGE) { 130 if (ctrl & NAND_CTRL_CHANGE) {
131 unsigned long bits; 131 unsigned long bits;
132 132
133 bits = (~ctrl & NAND_NCE) << 2; 133 bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
134 bits |= (ctrl & NAND_CLE) << 7; 134 bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
135 bits |= (ctrl & NAND_ALE) << 6; 135 bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
136 136
137 ams_delta_latch2_write(0xC2, bits); 137 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
138 AMS_DELTA_LATCH2_NAND_ALE |
139 AMS_DELTA_LATCH2_NAND_NCE, bits);
138 } 140 }
139 141
140 if (cmd != NAND_CMD_NONE) 142 if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 62b861304e0..c8cbc00243f 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1093 1093
1094 ret = nand_do_read_ops(mtd, from, &chip->ops); 1094 ret = nand_do_read_ops(mtd, from, &chip->ops);
1095 1095
1096 *retlen = chip->ops.retlen;
1097
1096 nand_release_device(mtd); 1098 nand_release_device(mtd);
1097 1099
1098 *retlen = chip->ops.retlen;
1099 return ret; 1100 return ret;
1100} 1101}
1101 1102
@@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1691 1692
1692 ret = nand_do_write_ops(mtd, to, &chip->ops); 1693 ret = nand_do_write_ops(mtd, to, &chip->ops);
1693 1694
1695 *retlen = chip->ops.retlen;
1696
1694 nand_release_device(mtd); 1697 nand_release_device(mtd);
1695 1698
1696 *retlen = chip->ops.retlen;
1697 return ret; 1699 return ret;
1698} 1700}
1699 1701
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4532b17e40e..aedfddf20cb 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1003 /* Calculate the next Tx descriptor entry. */ 1003 /* Calculate the next Tx descriptor entry. */
1004 int entry = vp->cur_tx % TX_RING_SIZE; 1004 int entry = vp->cur_tx % TX_RING_SIZE;
1005 struct boom_tx_desc *prev_entry; 1005 struct boom_tx_desc *prev_entry;
1006 unsigned long flags, i; 1006 unsigned long flags;
1007 int i;
1007 1008
1008 if (vp->tx_full) /* No room to transmit with */ 1009 if (vp->tx_full) /* No room to transmit with */
1009 return 1; 1010 return 1;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 7e2ca957146..257d3bce399 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -899,7 +899,7 @@ memory_squeeze:
899} 899}
900 900
901 901
902static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 902static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
903{ 903{
904 struct i596_cmd *ptr; 904 struct i596_cmd *ptr;
905 905
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
932 lp->scb.cmd = I596_NULL; 932 lp->scb.cmd = I596_NULL;
933} 933}
934 934
935static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) 935static void i596_reset(struct net_device *dev, struct i596_private *lp,
936 int ioaddr)
936{ 937{
937 unsigned long flags; 938 unsigned long flags;
938 939
@@ -1578,7 +1579,7 @@ static int debug = -1;
1578module_param(debug, int, 0); 1579module_param(debug, int, 0);
1579MODULE_PARM_DESC(debug, "i82596 debug mask"); 1580MODULE_PARM_DESC(debug, "i82596 debug mask");
1580 1581
1581int init_module(void) 1582int __init init_module(void)
1582{ 1583{
1583 if (debug >= 0) 1584 if (debug >= 0)
1584 i596_debug = debug; 1585 i596_debug = debug;
@@ -1588,7 +1589,7 @@ int init_module(void)
1588 return 0; 1589 return 0;
1589} 1590}
1590 1591
1591void cleanup_module(void) 1592void __exit cleanup_module(void)
1592{ 1593{
1593 unregister_netdev(dev_82596); 1594 unregister_netdev(dev_82596);
1594#ifdef __mc68000__ 1595#ifdef __mc68000__
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 39189903e35..30b3671d833 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO
1724 1724
1725 If unsure, say Y. 1725 If unsure, say Y.
1726 1726
1727config VIA_RHINE_NAPI
1728 bool "Use Rx Polling (NAPI)"
1729 depends on VIA_RHINE
1730 help
1731 NAPI is a new driver API designed to reduce CPU and interrupt load
1732 when the driver is receiving lots of packets from the card.
1733
1734 If your estimated Rx load is 10kpps or more, or if the card will be
1735 deployed on potentially unfriendly networks (e.g. in a firewall),
1736 then say Y here.
1737
1738 See <file:Documentation/networking/NAPI_HOWTO.txt> for more
1739 information.
1740
1727config LAN_SAA9730 1741config LAN_SAA9730
1728 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" 1742 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
1729 depends on NET_PCI && EXPERIMENTAL && MIPS 1743 depends on NET_PCI && EXPERIMENTAL && MIPS
@@ -2219,6 +2233,33 @@ config GFAR_NAPI
2219 bool "NAPI Support" 2233 bool "NAPI Support"
2220 depends on GIANFAR 2234 depends on GIANFAR
2221 2235
2236config UCC_GETH
2237 tristate "Freescale QE UCC GETH"
2238 depends on QUICC_ENGINE && UCC_FAST
2239 help
2240 This driver supports the Gigabit Ethernet mode of QE UCC.
2241 QE can be found on MPC836x CPUs.
2242
2243config UGETH_NAPI
2244 bool "NAPI Support"
2245 depends on UCC_GETH
2246
2247config UGETH_MAGIC_PACKET
2248 bool "Magic Packet detection support"
2249 depends on UCC_GETH
2250
2251config UGETH_FILTERING
2252 bool "Mac address filtering support"
2253 depends on UCC_GETH
2254
2255config UGETH_TX_ON_DEMOND
2256 bool "Transmit on Demond support"
2257 depends on UCC_GETH
2258
2259config UGETH_HAS_GIGA
2260 bool
2261 depends on UCC_GETH && MPC836x
2262
2222config MV643XX_ETH 2263config MV643XX_ETH
2223 tristate "MV-643XX Ethernet support" 2264 tristate "MV-643XX Ethernet support"
2224 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM 2265 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e95126f7..8427bf9dec9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \
18 gianfar_mii.o \ 18 gianfar_mii.o \
19 gianfar_sysfs.o 19 gianfar_sysfs.o
20 20
21obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
22ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o
23
21# 24#
22# link order important here 25# link order important here
23# 26#
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 7952dc6d77e..0fbbcb75af6 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
371MODULE_LICENSE("GPL"); 371MODULE_LICENSE("GPL");
372 372
373int 373int __init init_module(void)
374init_module(void)
375{ 374{
376 struct net_device *dev; 375 struct net_device *dev;
377 int this_dev, found = 0; 376 int this_dev, found = 0;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1d01ac0000e..ae7f828344d 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1030,7 +1030,7 @@ module_param(io, int, 0);
1030module_param(irq, int, 0); 1030module_param(irq, int, 0);
1031module_param(board_type, int, 0); 1031module_param(board_type, int, 0);
1032 1032
1033int init_module(void) 1033int __init init_module(void)
1034{ 1034{
1035 if (io == 0) 1035 if (io == 0)
1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 5d7929c79bc..4ca061c2d5b 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
901MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); 901MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
902MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); 902MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
903 903
904int init_module(void) 904int __init init_module(void)
905{ 905{
906 if (io == 0) 906 if (io == 0)
907 printk("at1700: You should not use auto-probing with insmod!\n"); 907 printk("at1700: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index db73de0d251..652eb05a6c2 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.4.43" 59#define DRV_MODULE_VERSION "1.4.44"
60#define DRV_MODULE_RELDATE "June 28, 2006" 60#define DRV_MODULE_RELDATE "August 10, 2006"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209 209
210static inline u32 bnx2_tx_avail(struct bnx2 *bp) 210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{ 211{
212 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); 212 u32 diff;
213 213
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
214 if (diff > MAX_TX_DESC_CNT) 216 if (diff > MAX_TX_DESC_CNT)
215 diff = (diff & MAX_TX_DESC_CNT) - 1; 217 diff = (diff & MAX_TX_DESC_CNT) - 1;
216 return (bp->tx_ring_size - diff); 218 return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1569 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1570 unsigned long align; 1572 unsigned long align;
1571 1573
1572 skb = dev_alloc_skb(bp->rx_buf_size); 1574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1573 if (skb == NULL) { 1575 if (skb == NULL) {
1574 return -ENOMEM; 1576 return -ENOMEM;
1575 } 1577 }
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1578 skb_reserve(skb, 8 - align); 1580 skb_reserve(skb, 8 - align);
1579 } 1581 }
1580 1582
1581 skb->dev = bp->dev;
1582 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1583 PCI_DMA_FROMDEVICE); 1584 PCI_DMA_FROMDEVICE);
1584 1585
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
1686 } 1687 }
1687 1688
1688 bp->tx_cons = sw_cons; 1689 bp->tx_cons = sw_cons;
1690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1694 */
1695 smp_mb();
1689 1696
1690 if (unlikely(netif_queue_stopped(bp->dev))) { 1697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1691 spin_lock(&bp->tx_lock); 1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
1692 if ((netif_queue_stopped(bp->dev)) && 1700 if ((netif_queue_stopped(bp->dev)) &&
1693 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { 1701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1694
1695 netif_wake_queue(bp->dev); 1702 netif_wake_queue(bp->dev);
1696 } 1703 netif_tx_unlock(bp->dev);
1697 spin_unlock(&bp->tx_lock);
1698 } 1704 }
1699} 1705}
1700 1706
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1786 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { 1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1787 struct sk_buff *new_skb; 1793 struct sk_buff *new_skb;
1788 1794
1789 new_skb = dev_alloc_skb(len + 2); 1795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1790 if (new_skb == NULL) 1796 if (new_skb == NULL)
1791 goto reuse_rx; 1797 goto reuse_rx;
1792 1798
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1797 1803
1798 skb_reserve(new_skb, 2); 1804 skb_reserve(new_skb, 2);
1799 skb_put(new_skb, len); 1805 skb_put(new_skb, len);
1800 new_skb->dev = bp->dev;
1801 1806
1802 bnx2_reuse_rx_skb(bp, skb, 1807 bnx2_reuse_rx_skb(bp, skb,
1803 sw_ring_cons, sw_ring_prod); 1808 sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
3503 struct tx_bd *txbd; 3508 struct tx_bd *txbd;
3504 u32 val; 3509 u32 val;
3505 3510
3511 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3512
3506 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 3513 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3507 3514
3508 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 3515 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3952 return -EINVAL; 3959 return -EINVAL;
3953 3960
3954 pkt_size = 1514; 3961 pkt_size = 1514;
3955 skb = dev_alloc_skb(pkt_size); 3962 skb = netdev_alloc_skb(bp->dev, pkt_size);
3956 if (!skb) 3963 if (!skb)
3957 return -ENOMEM; 3964 return -ENOMEM;
3958 packet = skb_put(skb, pkt_size); 3965 packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4390#endif 4397#endif
4391 4398
4392/* Called with netif_tx_lock. 4399/* Called with netif_tx_lock.
4393 * hard_start_xmit is pseudo-lockless - a lock is only required when 4400 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4394 * the tx queue is full. This way, we get the benefit of lockless 4401 * netif_wake_queue().
4395 * operations most of the time without the complexities to handle
4396 * netif_stop_queue/wake_queue race conditions.
4397 */ 4402 */
4398static int 4403static int
4399bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) 4404bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4512 dev->trans_start = jiffies; 4517 dev->trans_start = jiffies;
4513 4518
4514 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { 4519 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4515 spin_lock(&bp->tx_lock);
4516 netif_stop_queue(dev); 4520 netif_stop_queue(dev);
4517 4521 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4518 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4519 netif_wake_queue(dev); 4522 netif_wake_queue(dev);
4520 spin_unlock(&bp->tx_lock);
4521 } 4523 }
4522 4524
4523 return NETDEV_TX_OK; 4525 return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5628 bp->pdev = pdev; 5630 bp->pdev = pdev;
5629 5631
5630 spin_lock_init(&bp->phy_lock); 5632 spin_lock_init(&bp->phy_lock);
5631 spin_lock_init(&bp->tx_lock);
5632 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5633 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5633 5634
5634 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5751 bp->mac_addr[5] = (u8) reg; 5752 bp->mac_addr[5] = (u8) reg;
5752 5753
5753 bp->tx_ring_size = MAX_TX_DESC_CNT; 5754 bp->tx_ring_size = MAX_TX_DESC_CNT;
5754 bnx2_set_rx_ring_size(bp, 100); 5755 bnx2_set_rx_ring_size(bp, 255);
5755 5756
5756 bp->rx_csum = 1; 5757 bp->rx_csum = 1;
5757 5758
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c7..fe804763c60 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); 3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
3891 u16 tx_prod; 3891 u16 tx_prod;
3892 3892
3893 struct tx_bd *tx_desc_ring;
3894 struct sw_bd *tx_buf_ring;
3895 int tx_ring_size;
3896
3897 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); 3893 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
3898 u16 hw_tx_cons; 3894 u16 hw_tx_cons;
3899 3895
@@ -3916,9 +3912,11 @@ struct bnx2 {
3916 struct sw_bd *rx_buf_ring; 3912 struct sw_bd *rx_buf_ring;
3917 struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; 3913 struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
3918 3914
3919 /* Only used to synchronize netif_stop_queue/wake_queue when tx */ 3915 /* TX constants */
3920 /* ring is full */ 3916 struct tx_bd *tx_desc_ring;
3921 spinlock_t tx_lock; 3917 struct sw_bd *tx_buf_ring;
3918 int tx_ring_size;
3919 u32 tx_wake_thresh;
3922 3920
3923 /* End of fields used in the performance code paths. */ 3921 /* End of fields used in the performance code paths. */
3924 3922
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 47eecce35fa..2dcca79b1f6 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL");
1905 1905
1906*/ 1906*/
1907 1907
1908int 1908int __init init_module(void)
1909init_module(void)
1910{ 1909{
1911 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 1910 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1912 struct net_local *lp; 1911 struct net_local *lp;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 1b758b70713..3d76fa144c4 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev)
339 spin_unlock_irqrestore(&db->lock,flags); 339 spin_unlock_irqrestore(&db->lock,flags);
340} 340}
341 341
342#ifdef CONFIG_NET_POLL_CONTROLLER
343/*
344 *Used by netconsole
345 */
346static void dm9000_poll_controller(struct net_device *dev)
347{
348 disable_irq(dev->irq);
349 dm9000_interrupt(dev->irq,dev,NULL);
350 enable_irq(dev->irq);
351}
352#endif
342 353
343/* dm9000_release_board 354/* dm9000_release_board
344 * 355 *
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev)
538 ndev->stop = &dm9000_stop; 549 ndev->stop = &dm9000_stop;
539 ndev->get_stats = &dm9000_get_stats; 550 ndev->get_stats = &dm9000_get_stats;
540 ndev->set_multicast_list = &dm9000_hash_table; 551 ndev->set_multicast_list = &dm9000_hash_table;
552#ifdef CONFIG_NET_POLL_CONTROLLER
553 ndev->poll_controller = &dm9000_poll_controller;
554#endif
541 555
542#ifdef DM9000_PROGRAM_EEPROM 556#ifdef DM9000_PROGRAM_EEPROM
543 program_eeprom(db); 557 program_eeprom(db);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 91ef5f2fd76..ce850f1078b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -173,8 +173,11 @@ MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION); 173MODULE_VERSION(DRV_VERSION);
174 174
175static int debug = 3; 175static int debug = 3;
176static int eeprom_bad_csum_allow = 0;
176module_param(debug, int, 0); 177module_param(debug, int, 0);
178module_param(eeprom_bad_csum_allow, int, 0);
177MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 179MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
180MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
178#define DPRINTK(nlevel, klevel, fmt, args...) \ 181#define DPRINTK(nlevel, klevel, fmt, args...) \
179 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
180 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic)
756 checksum = le16_to_cpu(0xBABA - checksum); 759 checksum = le16_to_cpu(0xBABA - checksum);
757 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) { 760 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
758 DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); 761 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
759 return -EAGAIN; 762 if (!eeprom_bad_csum_allow)
763 return -EAGAIN;
760 } 764 }
761 765
762 return 0; 766 return 0;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 583518ae49c..b3b919116e0 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -105,6 +105,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
105 uint16_t duplex); 105 uint16_t duplex);
106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
107 107
108static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw,
109 uint32_t segment);
110static int32_t e1000_get_software_flag(struct e1000_hw *hw);
111static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
112static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
113static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
114static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
115 uint16_t words, uint16_t *data);
116static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
117 uint8_t* data);
118static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
119 uint16_t *data);
120static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
121 uint16_t *data);
122static void e1000_release_software_flag(struct e1000_hw *hw);
123static void e1000_release_software_semaphore(struct e1000_hw *hw);
124static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw,
125 uint32_t no_snoop);
126static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw,
127 uint32_t index, uint8_t byte);
128static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
129 uint16_t words, uint16_t *data);
130static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
131 uint8_t data);
132static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
133 uint16_t data);
134
108/* IGP cable length table */ 135/* IGP cable length table */
109static const 136static const
110uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 137uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
@@ -3233,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3233 return data; 3260 return data;
3234} 3261}
3235 3262
3236int32_t 3263static int32_t
3237e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) 3264e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3238{ 3265{
3239 uint32_t swfw_sync = 0; 3266 uint32_t swfw_sync = 0;
@@ -3277,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3277 return E1000_SUCCESS; 3304 return E1000_SUCCESS;
3278} 3305}
3279 3306
3280void 3307static void
3281e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) 3308e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3282{ 3309{
3283 uint32_t swfw_sync; 3310 uint32_t swfw_sync;
@@ -3575,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
3575 return E1000_SUCCESS; 3602 return E1000_SUCCESS;
3576} 3603}
3577 3604
3578int32_t 3605static int32_t
3579e1000_read_kmrn_reg(struct e1000_hw *hw, 3606e1000_read_kmrn_reg(struct e1000_hw *hw,
3580 uint32_t reg_addr, 3607 uint32_t reg_addr,
3581 uint16_t *data) 3608 uint16_t *data)
@@ -3608,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
3608 return E1000_SUCCESS; 3635 return E1000_SUCCESS;
3609} 3636}
3610 3637
3611int32_t 3638static int32_t
3612e1000_write_kmrn_reg(struct e1000_hw *hw, 3639e1000_write_kmrn_reg(struct e1000_hw *hw,
3613 uint32_t reg_addr, 3640 uint32_t reg_addr,
3614 uint16_t data) 3641 uint16_t data)
@@ -3839,7 +3866,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3839* 3866*
3840* hw - struct containing variables accessed by shared code 3867* hw - struct containing variables accessed by shared code
3841******************************************************************************/ 3868******************************************************************************/
3842int32_t 3869static int32_t
3843e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) 3870e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3844{ 3871{
3845 int32_t ret_val; 3872 int32_t ret_val;
@@ -4086,7 +4113,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4086* hw - Struct containing variables accessed by shared code 4113* hw - Struct containing variables accessed by shared code
4087* phy_info - PHY information structure 4114* phy_info - PHY information structure
4088******************************************************************************/ 4115******************************************************************************/
4089int32_t 4116static int32_t
4090e1000_phy_ife_get_info(struct e1000_hw *hw, 4117e1000_phy_ife_get_info(struct e1000_hw *hw,
4091 struct e1000_phy_info *phy_info) 4118 struct e1000_phy_info *phy_info)
4092{ 4119{
@@ -5643,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5643 * for the first 15 multicast addresses, and hashes the rest into the 5670 * for the first 15 multicast addresses, and hashes the rest into the
5644 * multicast table. 5671 * multicast table.
5645 *****************************************************************************/ 5672 *****************************************************************************/
5673#if 0
5646void 5674void
5647e1000_mc_addr_list_update(struct e1000_hw *hw, 5675e1000_mc_addr_list_update(struct e1000_hw *hw,
5648 uint8_t *mc_addr_list, 5676 uint8_t *mc_addr_list,
@@ -5719,6 +5747,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5719 } 5747 }
5720 DEBUGOUT("MC Update Complete\n"); 5748 DEBUGOUT("MC Update Complete\n");
5721} 5749}
5750#endif /* 0 */
5722 5751
5723/****************************************************************************** 5752/******************************************************************************
5724 * Hashes an address to determine its location in the multicast table 5753 * Hashes an address to determine its location in the multicast table
@@ -6587,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
6587 * hw - Struct containing variables accessed by shared code 6616 * hw - Struct containing variables accessed by shared code
6588 * offset - offset to read from 6617 * offset - offset to read from
6589 *****************************************************************************/ 6618 *****************************************************************************/
6619#if 0
6590uint32_t 6620uint32_t
6591e1000_read_reg_io(struct e1000_hw *hw, 6621e1000_read_reg_io(struct e1000_hw *hw,
6592 uint32_t offset) 6622 uint32_t offset)
@@ -6597,6 +6627,7 @@ e1000_read_reg_io(struct e1000_hw *hw,
6597 e1000_io_write(hw, io_addr, offset); 6627 e1000_io_write(hw, io_addr, offset);
6598 return e1000_io_read(hw, io_data); 6628 return e1000_io_read(hw, io_data);
6599} 6629}
6630#endif /* 0 */
6600 6631
6601/****************************************************************************** 6632/******************************************************************************
6602 * Writes a value to one of the devices registers using port I/O (as opposed to 6633 * Writes a value to one of the devices registers using port I/O (as opposed to
@@ -7909,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7909 * returns: - none. 7940 * returns: - none.
7910 * 7941 *
7911 ***************************************************************************/ 7942 ***************************************************************************/
7943#if 0
7912void 7944void
7913e1000_enable_pciex_master(struct e1000_hw *hw) 7945e1000_enable_pciex_master(struct e1000_hw *hw)
7914{ 7946{
@@ -7923,6 +7955,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
7923 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 7955 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
7924 E1000_WRITE_REG(hw, CTRL, ctrl); 7956 E1000_WRITE_REG(hw, CTRL, ctrl);
7925} 7957}
7958#endif /* 0 */
7926 7959
7927/******************************************************************************* 7960/*******************************************************************************
7928 * 7961 *
@@ -8148,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8148 * E1000_SUCCESS at any other case. 8181 * E1000_SUCCESS at any other case.
8149 * 8182 *
8150 ***************************************************************************/ 8183 ***************************************************************************/
8151int32_t 8184static int32_t
8152e1000_get_software_semaphore(struct e1000_hw *hw) 8185e1000_get_software_semaphore(struct e1000_hw *hw)
8153{ 8186{
8154 int32_t timeout = hw->eeprom.word_size + 1; 8187 int32_t timeout = hw->eeprom.word_size + 1;
@@ -8183,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8183 * hw: Struct containing variables accessed by shared code 8216 * hw: Struct containing variables accessed by shared code
8184 * 8217 *
8185 ***************************************************************************/ 8218 ***************************************************************************/
8186void 8219static void
8187e1000_release_software_semaphore(struct e1000_hw *hw) 8220e1000_release_software_semaphore(struct e1000_hw *hw)
8188{ 8221{
8189 uint32_t swsm; 8222 uint32_t swsm;
@@ -8265,7 +8298,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8265 * returns: E1000_SUCCESS 8298 * returns: E1000_SUCCESS
8266 * 8299 *
8267 *****************************************************************************/ 8300 *****************************************************************************/
8268int32_t 8301static int32_t
8269e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) 8302e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8270{ 8303{
8271 uint32_t gcr_reg = 0; 8304 uint32_t gcr_reg = 0;
@@ -8306,7 +8339,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8306 * hw: Struct containing variables accessed by shared code 8339 * hw: Struct containing variables accessed by shared code
8307 * 8340 *
8308 ***************************************************************************/ 8341 ***************************************************************************/
8309int32_t 8342static int32_t
8310e1000_get_software_flag(struct e1000_hw *hw) 8343e1000_get_software_flag(struct e1000_hw *hw)
8311{ 8344{
8312 int32_t timeout = PHY_CFG_TIMEOUT; 8345 int32_t timeout = PHY_CFG_TIMEOUT;
@@ -8345,7 +8378,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
8345 * hw: Struct containing variables accessed by shared code 8378 * hw: Struct containing variables accessed by shared code
8346 * 8379 *
8347 ***************************************************************************/ 8380 ***************************************************************************/
8348void 8381static void
8349e1000_release_software_flag(struct e1000_hw *hw) 8382e1000_release_software_flag(struct e1000_hw *hw)
8350{ 8383{
8351 uint32_t extcnf_ctrl; 8384 uint32_t extcnf_ctrl;
@@ -8369,6 +8402,7 @@ e1000_release_software_flag(struct e1000_hw *hw)
8369 * hw: Struct containing variables accessed by shared code 8402 * hw: Struct containing variables accessed by shared code
8370 * 8403 *
8371 ***************************************************************************/ 8404 ***************************************************************************/
8405#if 0
8372int32_t 8406int32_t
8373e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) 8407e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8374{ 8408{
@@ -8388,6 +8422,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8388 8422
8389 return ret_val; 8423 return ret_val;
8390} 8424}
8425#endif /* 0 */
8391 8426
8392/*************************************************************************** 8427/***************************************************************************
8393 * 8428 *
@@ -8397,6 +8432,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8397 * hw: Struct containing variables accessed by shared code 8432 * hw: Struct containing variables accessed by shared code
8398 * 8433 *
8399 ***************************************************************************/ 8434 ***************************************************************************/
8435#if 0
8400int32_t 8436int32_t
8401e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) 8437e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8402{ 8438{
@@ -8416,6 +8452,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8416 8452
8417 return ret_val; 8453 return ret_val;
8418} 8454}
8455#endif /* 0 */
8419 8456
8420/****************************************************************************** 8457/******************************************************************************
8421 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 8458 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
@@ -8426,7 +8463,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8426 * data - word read from the EEPROM 8463 * data - word read from the EEPROM
8427 * words - number of words to read 8464 * words - number of words to read
8428 *****************************************************************************/ 8465 *****************************************************************************/
8429int32_t 8466static int32_t
8430e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8467e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8431 uint16_t *data) 8468 uint16_t *data)
8432{ 8469{
@@ -8482,7 +8519,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8482 * words - number of words to write 8519 * words - number of words to write
8483 * data - words to write to the EEPROM 8520 * data - words to write to the EEPROM
8484 *****************************************************************************/ 8521 *****************************************************************************/
8485int32_t 8522static int32_t
8486e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8523e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8487 uint16_t *data) 8524 uint16_t *data)
8488{ 8525{
@@ -8529,7 +8566,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8529 * 8566 *
8530 * hw - The pointer to the hw structure 8567 * hw - The pointer to the hw structure
8531 ****************************************************************************/ 8568 ****************************************************************************/
8532int32_t 8569static int32_t
8533e1000_ich8_cycle_init(struct e1000_hw *hw) 8570e1000_ich8_cycle_init(struct e1000_hw *hw)
8534{ 8571{
8535 union ich8_hws_flash_status hsfsts; 8572 union ich8_hws_flash_status hsfsts;
@@ -8596,7 +8633,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
8596 * 8633 *
8597 * hw - The pointer to the hw structure 8634 * hw - The pointer to the hw structure
8598 ****************************************************************************/ 8635 ****************************************************************************/
8599int32_t 8636static int32_t
8600e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) 8637e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8601{ 8638{
8602 union ich8_hws_flash_ctrl hsflctl; 8639 union ich8_hws_flash_ctrl hsflctl;
@@ -8631,7 +8668,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8631 * size - Size of data to read, 1=byte 2=word 8668 * size - Size of data to read, 1=byte 2=word
8632 * data - Pointer to the word to store the value read. 8669 * data - Pointer to the word to store the value read.
8633 *****************************************************************************/ 8670 *****************************************************************************/
8634int32_t 8671static int32_t
8635e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, 8672e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8636 uint32_t size, uint16_t* data) 8673 uint32_t size, uint16_t* data)
8637{ 8674{
@@ -8710,7 +8747,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8710 * size - Size of data to read, 1=byte 2=word 8747 * size - Size of data to read, 1=byte 2=word
8711 * data - The byte(s) to write to the NVM. 8748 * data - The byte(s) to write to the NVM.
8712 *****************************************************************************/ 8749 *****************************************************************************/
8713int32_t 8750static int32_t
8714e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, 8751e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8715 uint16_t data) 8752 uint16_t data)
8716{ 8753{
@@ -8785,7 +8822,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8785 * index - The index of the byte to read. 8822 * index - The index of the byte to read.
8786 * data - Pointer to a byte to store the value read. 8823 * data - Pointer to a byte to store the value read.
8787 *****************************************************************************/ 8824 *****************************************************************************/
8788int32_t 8825static int32_t
8789e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) 8826e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8790{ 8827{
8791 int32_t status = E1000_SUCCESS; 8828 int32_t status = E1000_SUCCESS;
@@ -8808,7 +8845,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8808 * index - The index of the byte to write. 8845 * index - The index of the byte to write.
8809 * byte - The byte to write to the NVM. 8846 * byte - The byte to write to the NVM.
8810 *****************************************************************************/ 8847 *****************************************************************************/
8811int32_t 8848static int32_t
8812e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) 8849e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8813{ 8850{
8814 int32_t error = E1000_SUCCESS; 8851 int32_t error = E1000_SUCCESS;
@@ -8839,7 +8876,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8839 * index - The index of the byte to read. 8876 * index - The index of the byte to read.
8840 * data - The byte to write to the NVM. 8877 * data - The byte to write to the NVM.
8841 *****************************************************************************/ 8878 *****************************************************************************/
8842int32_t 8879static int32_t
8843e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) 8880e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8844{ 8881{
8845 int32_t status = E1000_SUCCESS; 8882 int32_t status = E1000_SUCCESS;
@@ -8857,7 +8894,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8857 * index - The starting byte index of the word to read. 8894 * index - The starting byte index of the word to read.
8858 * data - Pointer to a word to store the value read. 8895 * data - Pointer to a word to store the value read.
8859 *****************************************************************************/ 8896 *****************************************************************************/
8860int32_t 8897static int32_t
8861e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) 8898e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8862{ 8899{
8863 int32_t status = E1000_SUCCESS; 8900 int32_t status = E1000_SUCCESS;
@@ -8872,6 +8909,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8872 * index - The starting byte index of the word to read. 8909 * index - The starting byte index of the word to read.
8873 * data - The word to write to the NVM. 8910 * data - The word to write to the NVM.
8874 *****************************************************************************/ 8911 *****************************************************************************/
8912#if 0
8875int32_t 8913int32_t
8876e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) 8914e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8877{ 8915{
@@ -8879,6 +8917,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8879 status = e1000_write_ich8_data(hw, index, 2, data); 8917 status = e1000_write_ich8_data(hw, index, 2, data);
8880 return status; 8918 return status;
8881} 8919}
8920#endif /* 0 */
8882 8921
8883/****************************************************************************** 8922/******************************************************************************
8884 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. 8923 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
@@ -8887,7 +8926,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8887 * hw - pointer to e1000_hw structure 8926 * hw - pointer to e1000_hw structure
8888 * segment - 0 for first segment, 1 for second segment, etc. 8927 * segment - 0 for first segment, 1 for second segment, etc.
8889 *****************************************************************************/ 8928 *****************************************************************************/
8890int32_t 8929static int32_t
8891e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) 8930e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8892{ 8931{
8893 union ich8_hws_flash_status hsfsts; 8932 union ich8_hws_flash_status hsfsts;
@@ -8984,6 +9023,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8984 * hw: Struct containing variables accessed by shared code 9023 * hw: Struct containing variables accessed by shared code
8985 * 9024 *
8986 *****************************************************************************/ 9025 *****************************************************************************/
9026#if 0
8987int32_t 9027int32_t
8988e1000_duplex_reversal(struct e1000_hw *hw) 9028e1000_duplex_reversal(struct e1000_hw *hw)
8989{ 9029{
@@ -9012,8 +9052,9 @@ e1000_duplex_reversal(struct e1000_hw *hw)
9012 9052
9013 return ret_val; 9053 return ret_val;
9014} 9054}
9055#endif /* 0 */
9015 9056
9016int32_t 9057static int32_t
9017e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, 9058e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9018 uint32_t cnf_base_addr, uint32_t cnf_size) 9059 uint32_t cnf_base_addr, uint32_t cnf_size)
9019{ 9060{
@@ -9047,7 +9088,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9047} 9088}
9048 9089
9049 9090
9050int32_t 9091static int32_t
9051e1000_init_lcd_from_nvm(struct e1000_hw *hw) 9092e1000_init_lcd_from_nvm(struct e1000_hw *hw)
9052{ 9093{
9053 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; 9094 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f9341e3276b..375b95518c3 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat
323int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 323int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
324int32_t e1000_phy_reset(struct e1000_hw *hw); 324int32_t e1000_phy_reset(struct e1000_hw *hw);
325void e1000_phy_powerdown_workaround(struct e1000_hw *hw); 325void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
326int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
327int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
328int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 326int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 327int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
331int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
332int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
333 328
334/* EEPROM Functions */ 329/* EEPROM Functions */
335int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 330int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
400int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 395int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
401int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 396int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
402int32_t e1000_read_mac_addr(struct e1000_hw * hw); 397int32_t e1000_read_mac_addr(struct e1000_hw * hw);
403int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
404void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
405void e1000_release_software_flag(struct e1000_hw *hw);
406int32_t e1000_get_software_flag(struct e1000_hw *hw);
407 398
408/* Filters (multicast, vlan, receive) */ 399/* Filters (multicast, vlan, receive) */
409void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
410uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 400uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
411void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 401void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
412void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 402void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw);
431void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 421void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
432void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 422void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
433/* Port I/O is only supported on 82544 and newer */ 423/* Port I/O is only supported on 82544 and newer */
434uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
435uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
436void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 424void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
437void e1000_enable_pciex_master(struct e1000_hw *hw);
438int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 425int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
439int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
440void e1000_release_software_semaphore(struct e1000_hw *hw);
441int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 426int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
442int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
443
444int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
445 uint8_t *data);
446int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
447 uint8_t byte);
448int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
449 uint8_t byte);
450int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
451 uint16_t *data);
452int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
453 uint32_t size, uint16_t *data);
454int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
455 uint16_t words, uint16_t *data);
456int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
457 uint16_t words, uint16_t *data);
458int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
459 427
460 428
461#define E1000_READ_REG_IO(a, reg) \ 429#define E1000_READ_REG_IO(a, reg) \
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 627f224d78b..726f43d5593 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4386,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4386 pci_write_config_word(adapter->pdev, reg, *value); 4386 pci_write_config_word(adapter->pdev, reg, *value);
4387} 4387}
4388 4388
4389#if 0
4389uint32_t 4390uint32_t
4390e1000_io_read(struct e1000_hw *hw, unsigned long port) 4391e1000_io_read(struct e1000_hw *hw, unsigned long port)
4391{ 4392{
4392 return inl(port); 4393 return inl(port);
4393} 4394}
4395#endif /* 0 */
4394 4396
4395void 4397void
4396e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4398e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index e5c5cd2a271..e4e733a380e 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL");
425 425
426/* This is set up so that only a single autoprobe takes place per call. 426/* This is set up so that only a single autoprobe takes place per call.
427ISA device autoprobes on a running machine are not recommended. */ 427ISA device autoprobes on a running machine are not recommended. */
428int 428
429init_module(void) 429int __init init_module(void)
430{ 430{
431 struct net_device *dev; 431 struct net_device *dev;
432 int this_dev, found = 0; 432 int this_dev, found = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 20d31430c74..8dc61d65dd2 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
1807MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); 1807MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
1808MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); 1808MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
1809 1809
1810int 1810int __init init_module(void)
1811init_module(void)
1812{ 1811{
1813 struct net_device *dev; 1812 struct net_device *dev;
1814 int i; 1813 int i;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 33291bcf6d4..0701c1d810c 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL");
1698 * are specified, we verify and then use them. If no parameters are given, we 1698 * are specified, we verify and then use them. If no parameters are given, we
1699 * autoprobe for one card only. 1699 * autoprobe for one card only.
1700 */ 1700 */
1701int init_module(void) 1701int __init init_module(void)
1702{ 1702{
1703 struct net_device *dev; 1703 struct net_device *dev;
1704 int this_dev, found = 0; 1704 int this_dev, found = 0;
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 6b0ab1eac3f..fd7b32a24ea 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); 421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
422MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
423 423
424int 424int __init init_module(void)
425init_module(void)
426{ 425{
427 struct net_device *dev; 426 struct net_device *dev;
428 int this_dev, found = 0; 427 int this_dev, found = 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 4bf76f86d8e..ca42efa9143 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,
1434module_param(debug, int, 0); 1434module_param(debug, int, 0);
1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); 1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
1436 1436
1437int init_module(void) 1437int __init init_module(void)
1438{ 1438{
1439 int this_dev, found = 0; 1439 int this_dev, found = 0;
1440 struct net_device *dev; 1440 struct net_device *dev;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 97d34fee8c1..567e27413cf 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
92#include <asm/uaccess.h> 92#include <asm/uaccess.h>
93 93
94/* These identify the driver base version and may not be removed. */ 94/* These identify the driver base version and may not be removed. */
95static char version[] __devinitdata = 95static char version[] =
96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
97 97
98 98
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index d6dd3f2fb43..02d4dc18ba6 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_FS_ENET) += fs_enet.o 5obj-$(CONFIG_FS_ENET) += fs_enet.o
6 6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o 7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
8obj-$(CONFIG_8260) += mac-fcc.o 8obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
9 9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o 10fs_enet-objs := fs_enet-main.o
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h
new file mode 100644
index 00000000000..e980527e2b9
--- /dev/null
+++ b/drivers/net/fs_enet/fec.h
@@ -0,0 +1,42 @@
1#ifndef FS_ENET_FEC_H
2#define FS_ENET_FEC_H
3
4/* CRC polynomium used by the FEC for the multicast group filtering */
5#define FEC_CRC_POLY 0x04C11DB7
6
7#define FEC_MAX_MULTICAST_ADDRS 64
8
9/* Interrupt events/masks.
10*/
11#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
12#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
13#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
14#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
15#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
16#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
17#define FEC_ENET_RXF 0x02000000U /* Full frame received */
18#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
19#define FEC_ENET_MII 0x00800000U /* MII interrupt */
20#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
21
22#define FEC_ECNTRL_PINMUX 0x00000004
23#define FEC_ECNTRL_ETHER_EN 0x00000002
24#define FEC_ECNTRL_RESET 0x00000001
25
26#define FEC_RCNTRL_BC_REJ 0x00000010
27#define FEC_RCNTRL_PROM 0x00000008
28#define FEC_RCNTRL_MII_MODE 0x00000004
29#define FEC_RCNTRL_DRT 0x00000002
30#define FEC_RCNTRL_LOOP 0x00000001
31
32#define FEC_TCNTRL_FDEN 0x00000004
33#define FEC_TCNTRL_HBC 0x00000002
34#define FEC_TCNTRL_GTS 0x00000001
35
36
37
38/*
39 * Delay to wait for FEC reset command to complete (in us)
40 */
41#define FEC_RESET_DELAY 50
42#endif
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f6abff5846b..df62506a178 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/phy.h>
40 41
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq)
682 (*fep->ops->post_free_irq)(dev, irq); 683 (*fep->ops->post_free_irq)(dev, irq);
683} 684}
684 685
685/**********************************************************************************/
686
687/* This interrupt occurs when the PHY detects a link change. */
688static irqreturn_t
689fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
690{
691 struct net_device *dev = dev_id;
692 struct fs_enet_private *fep;
693 const struct fs_platform_info *fpi;
694
695 fep = netdev_priv(dev);
696 fpi = fep->fpi;
697
698 /*
699 * Acknowledge the interrupt if possible. If we have not
700 * found the PHY yet we can't process or acknowledge the
701 * interrupt now. Instead we ignore this interrupt for now,
702 * which we can do since it is edge triggered. It will be
703 * acknowledged later by fs_enet_open().
704 */
705 if (!fep->phy)
706 return IRQ_NONE;
707
708 fs_mii_ack_int(dev);
709 fs_mii_link_status_change_check(dev, 0);
710
711 return IRQ_HANDLED;
712}
713
714static void fs_timeout(struct net_device *dev) 686static void fs_timeout(struct net_device *dev)
715{ 687{
716 struct fs_enet_private *fep = netdev_priv(dev); 688 struct fs_enet_private *fep = netdev_priv(dev);
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev)
722 spin_lock_irqsave(&fep->lock, flags); 694 spin_lock_irqsave(&fep->lock, flags);
723 695
724 if (dev->flags & IFF_UP) { 696 if (dev->flags & IFF_UP) {
697 phy_stop(fep->phydev);
725 (*fep->ops->stop)(dev); 698 (*fep->ops->stop)(dev);
726 (*fep->ops->restart)(dev); 699 (*fep->ops->restart)(dev);
700 phy_start(fep->phydev);
727 } 701 }
728 702
703 phy_start(fep->phydev);
729 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 704 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
730 spin_unlock_irqrestore(&fep->lock, flags); 705 spin_unlock_irqrestore(&fep->lock, flags);
731 706
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev)
733 netif_wake_queue(dev); 708 netif_wake_queue(dev);
734} 709}
735 710
711/*-----------------------------------------------------------------------------
712 * generic link-change handler - should be sufficient for most cases
713 *-----------------------------------------------------------------------------*/
714static void generic_adjust_link(struct net_device *dev)
715{
716 struct fs_enet_private *fep = netdev_priv(dev);
717 struct phy_device *phydev = fep->phydev;
718 int new_state = 0;
719
720 if (phydev->link) {
721
722 /* adjust to duplex mode */
723 if (phydev->duplex != fep->oldduplex){
724 new_state = 1;
725 fep->oldduplex = phydev->duplex;
726 }
727
728 if (phydev->speed != fep->oldspeed) {
729 new_state = 1;
730 fep->oldspeed = phydev->speed;
731 }
732
733 if (!fep->oldlink) {
734 new_state = 1;
735 fep->oldlink = 1;
736 netif_schedule(dev);
737 netif_carrier_on(dev);
738 netif_start_queue(dev);
739 }
740
741 if (new_state)
742 fep->ops->restart(dev);
743
744 } else if (fep->oldlink) {
745 new_state = 1;
746 fep->oldlink = 0;
747 fep->oldspeed = 0;
748 fep->oldduplex = -1;
749 netif_carrier_off(dev);
750 netif_stop_queue(dev);
751 }
752
753 if (new_state && netif_msg_link(fep))
754 phy_print_status(phydev);
755}
756
757
758static void fs_adjust_link(struct net_device *dev)
759{
760 struct fs_enet_private *fep = netdev_priv(dev);
761 unsigned long flags;
762
763 spin_lock_irqsave(&fep->lock, flags);
764
765 if(fep->ops->adjust_link)
766 fep->ops->adjust_link(dev);
767 else
768 generic_adjust_link(dev);
769
770 spin_unlock_irqrestore(&fep->lock, flags);
771}
772
773static int fs_init_phy(struct net_device *dev)
774{
775 struct fs_enet_private *fep = netdev_priv(dev);
776 struct phy_device *phydev;
777
778 fep->oldlink = 0;
779 fep->oldspeed = 0;
780 fep->oldduplex = -1;
781 if(fep->fpi->bus_id)
782 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
783 else {
784 printk("No phy bus ID specified in BSP code\n");
785 return -EINVAL;
786 }
787 if (IS_ERR(phydev)) {
788 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
789 return PTR_ERR(phydev);
790 }
791
792 fep->phydev = phydev;
793
794 return 0;
795}
796
797
736static int fs_enet_open(struct net_device *dev) 798static int fs_enet_open(struct net_device *dev)
737{ 799{
738 struct fs_enet_private *fep = netdev_priv(dev); 800 struct fs_enet_private *fep = netdev_priv(dev);
739 const struct fs_platform_info *fpi = fep->fpi;
740 int r; 801 int r;
802 int err;
741 803
742 /* Install our interrupt handler. */ 804 /* Install our interrupt handler. */
743 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 805 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
744 if (r != 0) { 806 if (r != 0) {
745 printk(KERN_ERR DRV_MODULE_NAME 807 printk(KERN_ERR DRV_MODULE_NAME
746 ": %s Could not allocate FEC IRQ!", dev->name); 808 ": %s Could not allocate FS_ENET IRQ!", dev->name);
747 return -EINVAL; 809 return -EINVAL;
748 } 810 }
749 811
750 /* Install our phy interrupt handler */ 812 err = fs_init_phy(dev);
751 if (fpi->phy_irq != -1) { 813 if(err)
752 814 return err;
753 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
754 if (r != 0) {
755 printk(KERN_ERR DRV_MODULE_NAME
756 ": %s Could not allocate PHY IRQ!", dev->name);
757 fs_free_irq(dev, fep->interrupt);
758 return -EINVAL;
759 }
760 }
761 815
762 fs_mii_startup(dev); 816 phy_start(fep->phydev);
763 netif_carrier_off(dev);
764 fs_mii_link_status_change_check(dev, 1);
765 817
766 return 0; 818 return 0;
767} 819}
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev)
769static int fs_enet_close(struct net_device *dev) 821static int fs_enet_close(struct net_device *dev)
770{ 822{
771 struct fs_enet_private *fep = netdev_priv(dev); 823 struct fs_enet_private *fep = netdev_priv(dev);
772 const struct fs_platform_info *fpi = fep->fpi;
773 unsigned long flags; 824 unsigned long flags;
774 825
775 netif_stop_queue(dev); 826 netif_stop_queue(dev);
776 netif_carrier_off(dev); 827 netif_carrier_off(dev);
777 fs_mii_shutdown(dev); 828 phy_stop(fep->phydev);
778 829
779 spin_lock_irqsave(&fep->lock, flags); 830 spin_lock_irqsave(&fep->lock, flags);
780 (*fep->ops->stop)(dev); 831 (*fep->ops->stop)(dev);
781 spin_unlock_irqrestore(&fep->lock, flags); 832 spin_unlock_irqrestore(&fep->lock, flags);
782 833
783 /* release any irqs */ 834 /* release any irqs */
784 if (fpi->phy_irq != -1) 835 phy_disconnect(fep->phydev);
785 fs_free_irq(dev, fpi->phy_irq); 836 fep->phydev = NULL;
786 fs_free_irq(dev, fep->interrupt); 837 fs_free_irq(dev, fep->interrupt);
787 838
788 return 0; 839 return 0;
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
830static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 881static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
831{ 882{
832 struct fs_enet_private *fep = netdev_priv(dev); 883 struct fs_enet_private *fep = netdev_priv(dev);
833 unsigned long flags; 884 return phy_ethtool_gset(fep->phydev, cmd);
834 int rc;
835
836 spin_lock_irqsave(&fep->lock, flags);
837 rc = mii_ethtool_gset(&fep->mii_if, cmd);
838 spin_unlock_irqrestore(&fep->lock, flags);
839
840 return rc;
841} 885}
842 886
843static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 887static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
844{ 888{
845 struct fs_enet_private *fep = netdev_priv(dev); 889 struct fs_enet_private *fep = netdev_priv(dev);
846 unsigned long flags; 890 phy_ethtool_sset(fep->phydev, cmd);
847 int rc; 891 return 0;
848
849 spin_lock_irqsave(&fep->lock, flags);
850 rc = mii_ethtool_sset(&fep->mii_if, cmd);
851 spin_unlock_irqrestore(&fep->lock, flags);
852
853 return rc;
854} 892}
855 893
856static int fs_nway_reset(struct net_device *dev) 894static int fs_nway_reset(struct net_device *dev)
857{ 895{
858 struct fs_enet_private *fep = netdev_priv(dev); 896 return 0;
859 return mii_nway_restart(&fep->mii_if);
860} 897}
861 898
862static u32 fs_get_msglevel(struct net_device *dev) 899static u32 fs_get_msglevel(struct net_device *dev)
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
898 return -EINVAL; 935 return -EINVAL;
899 936
900 spin_lock_irqsave(&fep->lock, flags); 937 spin_lock_irqsave(&fep->lock, flags);
901 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); 938 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
902 spin_unlock_irqrestore(&fep->lock, flags); 939 spin_unlock_irqrestore(&fep->lock, flags);
903 return rc; 940 return rc;
904} 941}
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev,
1030 } 1067 }
1031 registered = 1; 1068 registered = 1;
1032 1069
1033 err = fs_mii_connect(ndev);
1034 if (err != 0) {
1035 printk(KERN_ERR DRV_MODULE_NAME
1036 ": %s fs_mii_connect failed.\n", ndev->name);
1037 goto err;
1038 }
1039 1070
1040 return ndev; 1071 return ndev;
1041 1072
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev)
1073 1104
1074 fpi = fep->fpi; 1105 fpi = fep->fpi;
1075 1106
1076 fs_mii_disconnect(ndev);
1077
1078 unregister_netdev(ndev); 1107 unregister_netdev(ndev);
1079 1108
1080 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), 1109 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
@@ -1196,17 +1225,39 @@ static int __init fs_init(void)
1196 r = setup_immap(); 1225 r = setup_immap();
1197 if (r != 0) 1226 if (r != 0)
1198 return r; 1227 return r;
1199 r = driver_register(&fs_enet_fec_driver); 1228
1229#ifdef CONFIG_FS_ENET_HAS_FCC
1230 /* let's insert mii stuff */
1231 r = fs_enet_mdio_bb_init();
1232
1233 if (r != 0) {
1234 printk(KERN_ERR DRV_MODULE_NAME
1235 "BB PHY init failed.\n");
1236 return r;
1237 }
1238 r = driver_register(&fs_enet_fcc_driver);
1200 if (r != 0) 1239 if (r != 0)
1201 goto err; 1240 goto err;
1241#endif
1202 1242
1203 r = driver_register(&fs_enet_fcc_driver); 1243#ifdef CONFIG_FS_ENET_HAS_FEC
1244 r = fs_enet_mdio_fec_init();
1245 if (r != 0) {
1246 printk(KERN_ERR DRV_MODULE_NAME
1247 "FEC PHY init failed.\n");
1248 return r;
1249 }
1250
1251 r = driver_register(&fs_enet_fec_driver);
1204 if (r != 0) 1252 if (r != 0)
1205 goto err; 1253 goto err;
1254#endif
1206 1255
1256#ifdef CONFIG_FS_ENET_HAS_SCC
1207 r = driver_register(&fs_enet_scc_driver); 1257 r = driver_register(&fs_enet_scc_driver);
1208 if (r != 0) 1258 if (r != 0)
1209 goto err; 1259 goto err;
1260#endif
1210 1261
1211 return 0; 1262 return 0;
1212err: 1263err:
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
deleted file mode 100644
index b7e6e21725c..00000000000
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39
40#include <asm/pgtable.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "fs_enet.h"
45
46/*************************************************/
47
48/*
49 * Generic PHY support.
50 * Should work for all PHYs, but link change is detected by polling
51 */
52
53static void generic_timer_callback(unsigned long data)
54{
55 struct net_device *dev = (struct net_device *)data;
56 struct fs_enet_private *fep = netdev_priv(dev);
57
58 fep->phy_timer_list.expires = jiffies + HZ / 2;
59
60 add_timer(&fep->phy_timer_list);
61
62 fs_mii_link_status_change_check(dev, 0);
63}
64
65static void generic_startup(struct net_device *dev)
66{
67 struct fs_enet_private *fep = netdev_priv(dev);
68
69 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
70 fep->phy_timer_list.data = (unsigned long)dev;
71 fep->phy_timer_list.function = generic_timer_callback;
72 add_timer(&fep->phy_timer_list);
73}
74
75static void generic_shutdown(struct net_device *dev)
76{
77 struct fs_enet_private *fep = netdev_priv(dev);
78
79 del_timer_sync(&fep->phy_timer_list);
80}
81
82/* ------------------------------------------------------------------------- */
83/* The Davicom DM9161 is used on the NETTA board */
84
85/* register definitions */
86
87#define MII_DM9161_ANAR 4 /* Aux. Config Register */
88#define MII_DM9161_ACR 16 /* Aux. Config Register */
89#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
90#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
91#define MII_DM9161_INTR 21 /* Interrupt Register */
92#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
93#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
94
95static void dm9161_startup(struct net_device *dev)
96{
97 struct fs_enet_private *fep = netdev_priv(dev);
98
99 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
100 /* Start autonegotiation */
101 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
102
103 set_current_state(TASK_UNINTERRUPTIBLE);
104 schedule_timeout(HZ*8);
105}
106
107static void dm9161_ack_int(struct net_device *dev)
108{
109 struct fs_enet_private *fep = netdev_priv(dev);
110
111 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
112}
113
114static void dm9161_shutdown(struct net_device *dev)
115{
116 struct fs_enet_private *fep = netdev_priv(dev);
117
118 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
119}
120
121/**********************************************************************************/
122
123static const struct phy_info phy_info[] = {
124 {
125 .id = 0x00181b88,
126 .name = "DM9161",
127 .startup = dm9161_startup,
128 .ack_int = dm9161_ack_int,
129 .shutdown = dm9161_shutdown,
130 }, {
131 .id = 0,
132 .name = "GENERIC",
133 .startup = generic_startup,
134 .shutdown = generic_shutdown,
135 },
136};
137
138/**********************************************************************************/
139
140static int phy_id_detect(struct net_device *dev)
141{
142 struct fs_enet_private *fep = netdev_priv(dev);
143 const struct fs_platform_info *fpi = fep->fpi;
144 struct fs_enet_mii_bus *bus = fep->mii_bus;
145 int i, r, start, end, phytype, physubtype;
146 const struct phy_info *phy;
147 int phy_hwid, phy_id;
148
149 phy_hwid = -1;
150 fep->phy = NULL;
151
152 /* auto-detect? */
153 if (fpi->phy_addr == -1) {
154 start = 1;
155 end = 32;
156 } else { /* direct */
157 start = fpi->phy_addr;
158 end = start + 1;
159 }
160
161 for (phy_id = start; phy_id < end; phy_id++) {
162 /* skip already used phy addresses on this bus */
163 if (bus->usage_map & (1 << phy_id))
164 continue;
165 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
166 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
167 continue;
168 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
169 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
170 continue;
171 phy_hwid = (phytype << 16) | physubtype;
172 if (phy_hwid != -1)
173 break;
174 }
175
176 if (phy_hwid == -1) {
177 printk(KERN_ERR DRV_MODULE_NAME
178 ": %s No PHY detected! range=0x%02x-0x%02x\n",
179 dev->name, start, end);
180 return -1;
181 }
182
183 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
184 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
185 break;
186
187 if (i >= ARRAY_SIZE(phy_info)) {
188 printk(KERN_ERR DRV_MODULE_NAME
189 ": %s PHY id 0x%08x is not supported!\n",
190 dev->name, phy_hwid);
191 return -1;
192 }
193
194 fep->phy = phy;
195
196 /* mark this address as used */
197 bus->usage_map |= (1 << phy_id);
198
199 printk(KERN_INFO DRV_MODULE_NAME
200 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
201 dev->name, phy_id, fep->phy->name, phy_hwid,
202 fpi->phy_addr == -1 ? " (auto-detected)" : "");
203
204 return phy_id;
205}
206
207void fs_mii_startup(struct net_device *dev)
208{
209 struct fs_enet_private *fep = netdev_priv(dev);
210
211 if (fep->phy->startup)
212 (*fep->phy->startup) (dev);
213}
214
215void fs_mii_shutdown(struct net_device *dev)
216{
217 struct fs_enet_private *fep = netdev_priv(dev);
218
219 if (fep->phy->shutdown)
220 (*fep->phy->shutdown) (dev);
221}
222
223void fs_mii_ack_int(struct net_device *dev)
224{
225 struct fs_enet_private *fep = netdev_priv(dev);
226
227 if (fep->phy->ack_int)
228 (*fep->phy->ack_int) (dev);
229}
230
231#define MII_LINK 0x0001
232#define MII_HALF 0x0002
233#define MII_FULL 0x0004
234#define MII_BASE4 0x0008
235#define MII_10M 0x0010
236#define MII_100M 0x0020
237#define MII_1G 0x0040
238#define MII_10G 0x0080
239
240/* return full mii info at one gulp, with a usable form */
241static unsigned int mii_full_status(struct mii_if_info *mii)
242{
243 unsigned int status;
244 int bmsr, adv, lpa, neg;
245 struct fs_enet_private* fep = netdev_priv(mii->dev);
246
247 /* first, a dummy read, needed to latch some MII phys */
248 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
249 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250
251 /* no link */
252 if ((bmsr & BMSR_LSTATUS) == 0)
253 return 0;
254
255 status = MII_LINK;
256
257 /* Lets look what ANEG says if it's supported - otherwize we shall
258 take the right values from the platform info*/
259 if(!mii->force_media) {
260 /* autoneg not completed; don't bother */
261 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
262 return 0;
263
264 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
265 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
266
267 neg = lpa & adv;
268 } else {
269 neg = fep->fpi->bus_info->lpa;
270 }
271
272 if (neg & LPA_100FULL)
273 status |= MII_FULL | MII_100M;
274 else if (neg & LPA_100BASE4)
275 status |= MII_FULL | MII_BASE4 | MII_100M;
276 else if (neg & LPA_100HALF)
277 status |= MII_HALF | MII_100M;
278 else if (neg & LPA_10FULL)
279 status |= MII_FULL | MII_10M;
280 else
281 status |= MII_HALF | MII_10M;
282
283 return status;
284}
285
286void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
287{
288 struct fs_enet_private *fep = netdev_priv(dev);
289 struct mii_if_info *mii = &fep->mii_if;
290 unsigned int mii_status;
291 int ok_to_print, link, duplex, speed;
292 unsigned long flags;
293
294 ok_to_print = netif_msg_link(fep);
295
296 mii_status = mii_full_status(mii);
297
298 if (!init_media && mii_status == fep->last_mii_status)
299 return;
300
301 fep->last_mii_status = mii_status;
302
303 link = !!(mii_status & MII_LINK);
304 duplex = !!(mii_status & MII_FULL);
305 speed = (mii_status & MII_100M) ? 100 : 10;
306
307 if (link == 0) {
308 netif_carrier_off(mii->dev);
309 netif_stop_queue(dev);
310 if (!init_media) {
311 spin_lock_irqsave(&fep->lock, flags);
312 (*fep->ops->stop)(dev);
313 spin_unlock_irqrestore(&fep->lock, flags);
314 }
315
316 if (ok_to_print)
317 printk(KERN_INFO "%s: link down\n", mii->dev->name);
318
319 } else {
320
321 mii->full_duplex = duplex;
322
323 netif_carrier_on(mii->dev);
324
325 spin_lock_irqsave(&fep->lock, flags);
326 fep->duplex = duplex;
327 fep->speed = speed;
328 (*fep->ops->restart)(dev);
329 spin_unlock_irqrestore(&fep->lock, flags);
330
331 netif_start_queue(dev);
332
333 if (ok_to_print)
334 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
335 dev->name, speed, duplex ? "full" : "half");
336 }
337}
338
339/**********************************************************************************/
340
341int fs_mii_read(struct net_device *dev, int phy_id, int location)
342{
343 struct fs_enet_private *fep = netdev_priv(dev);
344 struct fs_enet_mii_bus *bus = fep->mii_bus;
345
346 unsigned long flags;
347 int ret;
348
349 spin_lock_irqsave(&bus->mii_lock, flags);
350 ret = (*bus->mii_read)(bus, phy_id, location);
351 spin_unlock_irqrestore(&bus->mii_lock, flags);
352
353 return ret;
354}
355
356void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
357{
358 struct fs_enet_private *fep = netdev_priv(dev);
359 struct fs_enet_mii_bus *bus = fep->mii_bus;
360 unsigned long flags;
361
362 spin_lock_irqsave(&bus->mii_lock, flags);
363 (*bus->mii_write)(bus, phy_id, location, value);
364 spin_unlock_irqrestore(&bus->mii_lock, flags);
365}
366
367/*****************************************************************************/
368
369/* list of all registered mii buses */
370static LIST_HEAD(fs_mii_bus_list);
371
372static struct fs_enet_mii_bus *lookup_bus(int method, int id)
373{
374 struct list_head *ptr;
375 struct fs_enet_mii_bus *bus;
376
377 list_for_each(ptr, &fs_mii_bus_list) {
378 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
379 if (bus->bus_info->method == method &&
380 bus->bus_info->id == id)
381 return bus;
382 }
383 return NULL;
384}
385
386static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
387{
388 struct fs_enet_mii_bus *bus;
389 int ret = 0;
390
391 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
392 if (bus == NULL) {
393 ret = -ENOMEM;
394 goto err;
395 }
396 memset(bus, 0, sizeof(*bus));
397 spin_lock_init(&bus->mii_lock);
398 bus->bus_info = bi;
399 bus->refs = 0;
400 bus->usage_map = 0;
401
402 /* perform initialization */
403 switch (bi->method) {
404
405 case fsmii_fixed:
406 ret = fs_mii_fixed_init(bus);
407 if (ret != 0)
408 goto err;
409 break;
410
411 case fsmii_bitbang:
412 ret = fs_mii_bitbang_init(bus);
413 if (ret != 0)
414 goto err;
415 break;
416#ifdef CONFIG_FS_ENET_HAS_FEC
417 case fsmii_fec:
418 ret = fs_mii_fec_init(bus);
419 if (ret != 0)
420 goto err;
421 break;
422#endif
423 default:
424 ret = -EINVAL;
425 goto err;
426 }
427
428 list_add(&bus->list, &fs_mii_bus_list);
429
430 return bus;
431
432err:
433 kfree(bus);
434 return ERR_PTR(ret);
435}
436
437static void destroy_bus(struct fs_enet_mii_bus *bus)
438{
439 /* remove from bus list */
440 list_del(&bus->list);
441
442 /* nothing more needed */
443 kfree(bus);
444}
445
446int fs_mii_connect(struct net_device *dev)
447{
448 struct fs_enet_private *fep = netdev_priv(dev);
449 const struct fs_platform_info *fpi = fep->fpi;
450 struct fs_enet_mii_bus *bus = NULL;
451
452 /* check method validity */
453 switch (fpi->bus_info->method) {
454 case fsmii_fixed:
455 case fsmii_bitbang:
456 break;
457#ifdef CONFIG_FS_ENET_HAS_FEC
458 case fsmii_fec:
459 break;
460#endif
461 default:
462 printk(KERN_ERR DRV_MODULE_NAME
463 ": %s Unknown MII bus method (%d)!\n",
464 dev->name, fpi->bus_info->method);
465 return -EINVAL;
466 }
467
468 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
469
470 /* if not found create new bus */
471 if (bus == NULL) {
472 bus = create_bus(fpi->bus_info);
473 if (IS_ERR(bus)) {
474 printk(KERN_ERR DRV_MODULE_NAME
475 ": %s MII bus creation failure!\n", dev->name);
476 return PTR_ERR(bus);
477 }
478 }
479
480 bus->refs++;
481
482 fep->mii_bus = bus;
483
484 fep->mii_if.dev = dev;
485 fep->mii_if.phy_id_mask = 0x1f;
486 fep->mii_if.reg_num_mask = 0x1f;
487 fep->mii_if.mdio_read = fs_mii_read;
488 fep->mii_if.mdio_write = fs_mii_write;
489 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
490 fep->mii_if.phy_id = phy_id_detect(dev);
491
492 return 0;
493}
494
495void fs_mii_disconnect(struct net_device *dev)
496{
497 struct fs_enet_private *fep = netdev_priv(dev);
498 struct fs_enet_mii_bus *bus = NULL;
499
500 bus = fep->mii_bus;
501 fep->mii_bus = NULL;
502
503 if (--bus->refs <= 0)
504 destroy_bus(bus);
505}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e7ec96c964a..95022c005f7 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -5,6 +5,7 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/phy.h>
8 9
9#include <linux/fs_enet_pd.h> 10#include <linux/fs_enet_pd.h>
10 11
@@ -12,12 +13,30 @@
12 13
13#ifdef CONFIG_CPM1 14#ifdef CONFIG_CPM1
14#include <asm/commproc.h> 15#include <asm/commproc.h>
16
17struct fec_info {
18 fec_t* fecp;
19 u32 mii_speed;
20};
15#endif 21#endif
16 22
17#ifdef CONFIG_CPM2 23#ifdef CONFIG_CPM2
18#include <asm/cpm2.h> 24#include <asm/cpm2.h>
19#endif 25#endif
20 26
27/* This is used to operate with pins.
28 Note that the actual port size may
29 be different; cpm(s) handle it OK */
30struct bb_info {
31 u8 mdio_dat_msk;
32 u8 mdio_dir_msk;
33 u8 *mdio_dir;
34 u8 *mdio_dat;
35 u8 mdc_msk;
36 u8 *mdc_dat;
37 int delay;
38};
39
21/* hw driver ops */ 40/* hw driver ops */
22struct fs_ops { 41struct fs_ops {
23 int (*setup_data)(struct net_device *dev); 42 int (*setup_data)(struct net_device *dev);
@@ -25,6 +44,7 @@ struct fs_ops {
25 void (*free_bd)(struct net_device *dev); 44 void (*free_bd)(struct net_device *dev);
26 void (*cleanup_data)(struct net_device *dev); 45 void (*cleanup_data)(struct net_device *dev);
27 void (*set_multicast_list)(struct net_device *dev); 46 void (*set_multicast_list)(struct net_device *dev);
47 void (*adjust_link)(struct net_device *dev);
28 void (*restart)(struct net_device *dev); 48 void (*restart)(struct net_device *dev);
29 void (*stop)(struct net_device *dev); 49 void (*stop)(struct net_device *dev);
30 void (*pre_request_irq)(struct net_device *dev, int irq); 50 void (*pre_request_irq)(struct net_device *dev, int irq);
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus {
100 }; 120 };
101}; 121};
102 122
103int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
104int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
106
107struct fs_enet_private { 123struct fs_enet_private {
108 struct device *dev; /* pointer back to the device (must be initialized first) */ 124 struct device *dev; /* pointer back to the device (must be initialized first) */
109 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
@@ -130,7 +146,8 @@ struct fs_enet_private {
130 struct fs_enet_mii_bus *mii_bus; 146 struct fs_enet_mii_bus *mii_bus;
131 int interrupt; 147 int interrupt;
132 148
133 int duplex, speed; /* current settings */ 149 struct phy_device *phydev;
150 int oldduplex, oldspeed, oldlink; /* current settings */
134 151
135 /* event masks */ 152 /* event masks */
136 u32 ev_napi_rx; /* mask of NAPI rx events */ 153 u32 ev_napi_rx; /* mask of NAPI rx events */
@@ -168,15 +185,9 @@ struct fs_enet_private {
168}; 185};
169 186
170/***************************************************************************/ 187/***************************************************************************/
171 188int fs_enet_mdio_bb_init(void);
172int fs_mii_read(struct net_device *dev, int phy_id, int location); 189int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
173void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); 190int fs_enet_mdio_fec_init(void);
174
175void fs_mii_startup(struct net_device *dev);
176void fs_mii_shutdown(struct net_device *dev);
177void fs_mii_ack_int(struct net_device *dev);
178
179void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
180 191
181void fs_init_bds(struct net_device *dev); 192void fs_init_bds(struct net_device *dev);
182void fs_cleanup_bds(struct net_device *dev); 193void fs_cleanup_bds(struct net_device *dev);
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void);
194void fs_enet_platform_cleanup(void); 205void fs_enet_platform_cleanup(void);
195 206
196/***************************************************************************/ 207/***************************************************************************/
197
198/* buffer descriptor access macros */ 208/* buffer descriptor access macros */
199 209
200/* access macros */ 210/* access macros */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 64e20982c1f..1ff2597b849 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/phy.h>
37 38
38#include <asm/immap_cpm2.h> 39#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h> 40#include <asm/mpc8260.h>
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep)
122 123
123 /* Attach the memory for the FCC Parameter RAM */ 124 /* Attach the memory for the FCC Parameter RAM */
124 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); 125 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
125 fep->fcc.ep = (void *)r->start; 126 fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
126
127 if (fep->fcc.ep == NULL) 127 if (fep->fcc.ep == NULL)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); 130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
131 fep->fcc.fccp = (void *)r->start; 131 fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
132
133 if (fep->fcc.fccp == NULL) 132 if (fep->fcc.fccp == NULL)
134 return -EINVAL; 133 return -EINVAL;
135 134
136 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; 135 if (fep->fpi->fcc_regs_c) {
136
137 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
138 } else {
139 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
140 "fcc_regs_c");
141 fep->fcc.fcccp = (void *)ioremap(r->start,
142 r->end - r->start + 1);
143 }
137 144
138 if (fep->fcc.fcccp == NULL) 145 if (fep->fcc.fcccp == NULL)
139 return -EINVAL; 146 return -EINVAL;
140 147
148 fep->fcc.mem = (void *)fep->fpi->mem_offset;
149 if (fep->fcc.mem == NULL)
150 return -EINVAL;
151
141 return 0; 152 return 0;
142} 153}
143 154
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev)
155 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ 166 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
156 return -EINVAL; 167 return -EINVAL;
157 168
158 fep->fcc.mem = (void *)fpi->mem_offset;
159
160 if (do_pd_setup(fep) != 0) 169 if (do_pd_setup(fep) != 0)
161 return -EINVAL; 170 return -EINVAL;
162 171
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev)
394 403
395 /* adjust to speed (for RMII mode) */ 404 /* adjust to speed (for RMII mode) */
396 if (fpi->use_rmii) { 405 if (fpi->use_rmii) {
397 if (fep->speed == 100) 406 if (fep->phydev->speed == 100)
398 C8(fcccp, fcc_gfemr, 0x20); 407 C8(fcccp, fcc_gfemr, 0x20);
399 else 408 else
400 S8(fcccp, fcc_gfemr, 0x20); 409 S8(fcccp, fcc_gfemr, 0x20);
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev)
420 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); 429 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
421 430
422 /* adjust to duplex mode */ 431 /* adjust to duplex mode */
423 if (fep->duplex) 432 if (fep->phydev->duplex)
424 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 433 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425 else 434 else
426 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 435 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev)
486 495
487static void tx_kickstart(struct net_device *dev) 496static void tx_kickstart(struct net_device *dev)
488{ 497{
489 /* nothing */ 498 struct fs_enet_private *fep = netdev_priv(dev);
499 fcc_t *fccp = fep->fcc.fccp;
500
501 S32(fccp, fcc_ftodr, 0x80);
490} 502}
491 503
492static u32 get_int_events(struct net_device *dev) 504static u32 get_int_events(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index e0954707752..c2c5fd419bd 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -46,6 +46,7 @@
46#endif 46#endif
47 47
48#include "fs_enet.h" 48#include "fs_enet.h"
49#include "fec.h"
49 50
50/*************************************************/ 51/*************************************************/
51 52
@@ -75,50 +76,8 @@
75/* clear bits */ 76/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) 77#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77 78
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/* 79/*
121 * Delay to wait for FEC reset command to complete (in us) 80 * Delay to wait for FEC reset command to complete (in us)
122 */ 81 */
123#define FEC_RESET_DELAY 50 82#define FEC_RESET_DELAY 50
124 83
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev)
303 int r; 262 int r;
304 u32 addrhi, addrlo; 263 u32 addrhi, addrlo;
305 264
265 struct mii_bus* mii = fep->phydev->bus;
266 struct fec_info* fec_inf = mii->priv;
267
306 r = whack_reset(fep->fec.fecp); 268 r = whack_reset(fep->fec.fecp);
307 if (r != 0) 269 if (r != 0)
308 printk(KERN_ERR DRV_MODULE_NAME 270 printk(KERN_ERR DRV_MODULE_NAME
309 ": %s FEC Reset FAILED!\n", dev->name); 271 ": %s FEC Reset FAILED!\n", dev->name);
310
311 /* 272 /*
312 * Set station address. 273 * Set station address.
313 */ 274 */
314 addrhi = ((u32) dev->dev_addr[0] << 24) | 275 addrhi = ((u32) dev->dev_addr[0] << 24) |
315 ((u32) dev->dev_addr[1] << 16) | 276 ((u32) dev->dev_addr[1] << 16) |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev)
350 FW(fecp, fun_code, 0x78000000); 311 FW(fecp, fun_code, 0x78000000);
351 312
352 /* 313 /*
353 * Set MII speed. 314 * Set MII speed.
354 */ 315 */
355 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); 316 FW(fecp, mii_speed, fec_inf->mii_speed);
356 317
357 /* 318 /*
358 * Clear any outstanding interrupt. 319 * Clear any outstanding interrupt.
359 */ 320 */
360 FW(fecp, ievent, 0xffc0); 321 FW(fecp, ievent, 0xffc0);
361 FW(fecp, ivec, (fep->interrupt / 2) << 29); 322 FW(fecp, ivec, (fep->interrupt / 2) << 29);
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev)
390 } 351 }
391#endif 352#endif
392 353
354
393 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 355 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
394 /* 356 /*
395 * adjust to duplex mode 357 * adjust to duplex mode
396 */ 358 */
397 if (fep->duplex) { 359 if (fep->phydev->duplex) {
398 FC(fecp, r_cntrl, FEC_RCNTRL_DRT); 360 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
399 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ 361 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
400 } else { 362 } else {
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev)
418static void stop(struct net_device *dev) 380static void stop(struct net_device *dev)
419{ 381{
420 struct fs_enet_private *fep = netdev_priv(dev); 382 struct fs_enet_private *fep = netdev_priv(dev);
383 const struct fs_platform_info *fpi = fep->fpi;
421 fec_t *fecp = fep->fec.fecp; 384 fec_t *fecp = fep->fec.fecp;
422 struct fs_enet_mii_bus *bus = fep->mii_bus; 385
423 const struct fs_mii_bus_info *bi = bus->bus_info; 386 struct fec_info* feci= fep->phydev->bus->priv;
387
424 int i; 388 int i;
425 389
426 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) 390 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev)
444 fs_cleanup_bds(dev); 408 fs_cleanup_bds(dev);
445 409
446 /* shut down FEC1? that's where the mii bus is */ 410 /* shut down FEC1? that's where the mii bus is */
447 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { 411 if (fpi->has_phy) {
448 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 412 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
449 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 413 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
450 FW(fecp, ievent, FEC_ENET_MII); 414 FW(fecp, ievent, FEC_ENET_MII);
451 FW(fecp, mii_speed, bus->fec.mii_speed); 415 FW(fecp, mii_speed, feci->mii_speed);
452 } 416 }
453} 417}
454 418
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = {
583 .free_bd = free_bd, 547 .free_bd = free_bd,
584}; 548};
585 549
586/***********************************************************************/
587
588static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
589{
590 fec_t *fecp = bus->fec.fecp;
591 int i, ret = -1;
592
593 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
594 BUG();
595
596 /* Add PHY address to register command. */
597 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
598
599 for (i = 0; i < FEC_MII_LOOPS; i++)
600 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
601 break;
602
603 if (i < FEC_MII_LOOPS) {
604 FW(fecp, ievent, FEC_ENET_MII);
605 ret = FR(fecp, mii_data) & 0xffff;
606 }
607
608 return ret;
609}
610
611static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
612{
613 fec_t *fecp = bus->fec.fecp;
614 int i;
615
616 /* this must never happen */
617 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
618 BUG();
619
620 /* Add PHY address to register command. */
621 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
622
623 for (i = 0; i < FEC_MII_LOOPS; i++)
624 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
625 break;
626
627 if (i < FEC_MII_LOOPS)
628 FW(fecp, ievent, FEC_ENET_MII);
629}
630
631int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
632{
633 bd_t *bd = (bd_t *)__res;
634 const struct fs_mii_bus_info *bi = bus->bus_info;
635 fec_t *fecp;
636
637 if (bi->id != 0)
638 return -1;
639
640 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
641 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
642 & 0x3F) << 1;
643
644 fecp = bus->fec.fecp;
645
646 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
647 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
648 FW(fecp, ievent, FEC_ENET_MII);
649 FW(fecp, mii_speed, bus->fec.mii_speed);
650
651 bus->mii_read = mii_read;
652 bus->mii_write = mii_write;
653
654 return 0;
655}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index eaa24fab645..95ec5872c50 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev)
369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); 369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
370 370
371 /* Set full duplex mode if needed */ 371 /* Set full duplex mode if needed */
372 if (fep->duplex) 372 if (fep->phydev->duplex)
373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); 373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
374 374
375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev)
500 scc_cr_cmd(fep, CPM_CR_RESTART_TX); 500 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
501} 501}
502 502
503
504
503/*************************************************************************/ 505/*************************************************************************/
504 506
505const struct fs_ops fs_scc_ops = { 507const struct fs_ops fs_scc_ops = {
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 48f9cf83ab6..0b9b8b5c847 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -33,6 +33,7 @@
33#include <linux/mii.h> 33#include <linux/mii.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36#include <linux/platform_device.h>
36 37
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
38#include <asm/irq.h> 39#include <asm/irq.h>
@@ -40,129 +41,25 @@
40 41
41#include "fs_enet.h" 42#include "fs_enet.h"
42 43
43#ifdef CONFIG_8xx 44static int bitbang_prep_bit(u8 **datp, u8 *mskp,
44static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) 45 struct fs_mii_bit *mii_bit)
45{ 46{
46 immap_t *im = (immap_t *)fs_enet_immap; 47 void *dat;
47 void *dir, *dat, *ppar;
48 int adv; 48 int adv;
49 u8 msk; 49 u8 msk;
50 50
51 switch (port) { 51 dat = (void*) mii_bit->offset;
52 case fsiop_porta:
53 dir = &im->im_ioport.iop_padir;
54 dat = &im->im_ioport.iop_padat;
55 ppar = &im->im_ioport.iop_papar;
56 break;
57
58 case fsiop_portb:
59 dir = &im->im_cpm.cp_pbdir;
60 dat = &im->im_cpm.cp_pbdat;
61 ppar = &im->im_cpm.cp_pbpar;
62 break;
63
64 case fsiop_portc:
65 dir = &im->im_ioport.iop_pcdir;
66 dat = &im->im_ioport.iop_pcdat;
67 ppar = &im->im_ioport.iop_pcpar;
68 break;
69
70 case fsiop_portd:
71 dir = &im->im_ioport.iop_pddir;
72 dat = &im->im_ioport.iop_pddat;
73 ppar = &im->im_ioport.iop_pdpar;
74 break;
75
76 case fsiop_porte:
77 dir = &im->im_cpm.cp_pedir;
78 dat = &im->im_cpm.cp_pedat;
79 ppar = &im->im_cpm.cp_pepar;
80 break;
81
82 default:
83 printk(KERN_ERR DRV_MODULE_NAME
84 "Illegal port value %d!\n", port);
85 return -EINVAL;
86 }
87
88 adv = bit >> 3;
89 dir = (char *)dir + adv;
90 dat = (char *)dat + adv;
91 ppar = (char *)ppar + adv;
92
93 msk = 1 << (7 - (bit & 7));
94 if ((in_8(ppar) & msk) != 0) {
95 printk(KERN_ERR DRV_MODULE_NAME
96 "pin %d on port %d is not general purpose!\n", bit, port);
97 return -EINVAL;
98 }
99
100 *dirp = dir;
101 *datp = dat;
102 *mskp = msk;
103
104 return 0;
105}
106#endif
107
108#ifdef CONFIG_8260
109static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
110{
111 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
112 void *dir, *dat, *ppar;
113 int adv;
114 u8 msk;
115
116 switch (port) {
117 case fsiop_porta:
118 dir = &io->iop_pdira;
119 dat = &io->iop_pdata;
120 ppar = &io->iop_ppara;
121 break;
122
123 case fsiop_portb:
124 dir = &io->iop_pdirb;
125 dat = &io->iop_pdatb;
126 ppar = &io->iop_pparb;
127 break;
128
129 case fsiop_portc:
130 dir = &io->iop_pdirc;
131 dat = &io->iop_pdatc;
132 ppar = &io->iop_pparc;
133 break;
134
135 case fsiop_portd:
136 dir = &io->iop_pdird;
137 dat = &io->iop_pdatd;
138 ppar = &io->iop_ppard;
139 break;
140
141 default:
142 printk(KERN_ERR DRV_MODULE_NAME
143 "Illegal port value %d!\n", port);
144 return -EINVAL;
145 }
146 52
147 adv = bit >> 3; 53 adv = mii_bit->bit >> 3;
148 dir = (char *)dir + adv;
149 dat = (char *)dat + adv; 54 dat = (char *)dat + adv;
150 ppar = (char *)ppar + adv;
151 55
152 msk = 1 << (7 - (bit & 7)); 56 msk = 1 << (7 - (mii_bit->bit & 7));
153 if ((in_8(ppar) & msk) != 0) {
154 printk(KERN_ERR DRV_MODULE_NAME
155 "pin %d on port %d is not general purpose!\n", bit, port);
156 return -EINVAL;
157 }
158 57
159 *dirp = dir;
160 *datp = dat; 58 *datp = dat;
161 *mskp = msk; 59 *mskp = msk;
162 60
163 return 0; 61 return 0;
164} 62}
165#endif
166 63
167static inline void bb_set(u8 *p, u8 m) 64static inline void bb_set(u8 *p, u8 m)
168{ 65{
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m)
179 return (in_8(p) & m) != 0; 76 return (in_8(p) & m) != 0;
180} 77}
181 78
182static inline void mdio_active(struct fs_enet_mii_bus *bus) 79static inline void mdio_active(struct bb_info *bitbang)
183{ 80{
184 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 81 bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk);
185} 82}
186 83
187static inline void mdio_tristate(struct fs_enet_mii_bus *bus) 84static inline void mdio_tristate(struct bb_info *bitbang )
188{ 85{
189 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 86 bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk);
190} 87}
191 88
192static inline int mdio_read(struct fs_enet_mii_bus *bus) 89static inline int mdio_read(struct bb_info *bitbang )
193{ 90{
194 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 91 return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk);
195} 92}
196 93
197static inline void mdio(struct fs_enet_mii_bus *bus, int what) 94static inline void mdio(struct bb_info *bitbang , int what)
198{ 95{
199 if (what) 96 if (what)
200 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 97 bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk);
201 else 98 else
202 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 99 bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk);
203} 100}
204 101
205static inline void mdc(struct fs_enet_mii_bus *bus, int what) 102static inline void mdc(struct bb_info *bitbang , int what)
206{ 103{
207 if (what) 104 if (what)
208 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 105 bb_set(bitbang->mdc_dat, bitbang->mdc_msk);
209 else 106 else
210 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 107 bb_clr(bitbang->mdc_dat, bitbang->mdc_msk);
211} 108}
212 109
213static inline void mii_delay(struct fs_enet_mii_bus *bus) 110static inline void mii_delay(struct bb_info *bitbang )
214{ 111{
215 udelay(bus->bus_info->i.bitbang.delay); 112 udelay(bitbang->delay);
216} 113}
217 114
218/* Utility to send the preamble, address, and register (common to read and write). */ 115/* Utility to send the preamble, address, and register (common to read and write). */
219static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) 116static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
220{ 117{
221 int j; 118 int j;
222 119
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
228 * but it is safer and will be much more robust. 125 * but it is safer and will be much more robust.
229 */ 126 */
230 127
231 mdio_active(bus); 128 mdio_active(bitbang);
232 mdio(bus, 1); 129 mdio(bitbang, 1);
233 for (j = 0; j < 32; j++) { 130 for (j = 0; j < 32; j++) {
234 mdc(bus, 0); 131 mdc(bitbang, 0);
235 mii_delay(bus); 132 mii_delay(bitbang);
236 mdc(bus, 1); 133 mdc(bitbang, 1);
237 mii_delay(bus); 134 mii_delay(bitbang);
238 } 135 }
239 136
240 /* send the start bit (01) and the read opcode (10) or write (10) */ 137 /* send the start bit (01) and the read opcode (10) or write (10) */
241 mdc(bus, 0); 138 mdc(bitbang, 0);
242 mdio(bus, 0); 139 mdio(bitbang, 0);
243 mii_delay(bus); 140 mii_delay(bitbang);
244 mdc(bus, 1); 141 mdc(bitbang, 1);
245 mii_delay(bus); 142 mii_delay(bitbang);
246 mdc(bus, 0); 143 mdc(bitbang, 0);
247 mdio(bus, 1); 144 mdio(bitbang, 1);
248 mii_delay(bus); 145 mii_delay(bitbang);
249 mdc(bus, 1); 146 mdc(bitbang, 1);
250 mii_delay(bus); 147 mii_delay(bitbang);
251 mdc(bus, 0); 148 mdc(bitbang, 0);
252 mdio(bus, read); 149 mdio(bitbang, read);
253 mii_delay(bus); 150 mii_delay(bitbang);
254 mdc(bus, 1); 151 mdc(bitbang, 1);
255 mii_delay(bus); 152 mii_delay(bitbang);
256 mdc(bus, 0); 153 mdc(bitbang, 0);
257 mdio(bus, !read); 154 mdio(bitbang, !read);
258 mii_delay(bus); 155 mii_delay(bitbang);
259 mdc(bus, 1); 156 mdc(bitbang, 1);
260 mii_delay(bus); 157 mii_delay(bitbang);
261 158
262 /* send the PHY address */ 159 /* send the PHY address */
263 for (j = 0; j < 5; j++) { 160 for (j = 0; j < 5; j++) {
264 mdc(bus, 0); 161 mdc(bitbang, 0);
265 mdio(bus, (addr & 0x10) != 0); 162 mdio(bitbang, (addr & 0x10) != 0);
266 mii_delay(bus); 163 mii_delay(bitbang);
267 mdc(bus, 1); 164 mdc(bitbang, 1);
268 mii_delay(bus); 165 mii_delay(bitbang);
269 addr <<= 1; 166 addr <<= 1;
270 } 167 }
271 168
272 /* send the register address */ 169 /* send the register address */
273 for (j = 0; j < 5; j++) { 170 for (j = 0; j < 5; j++) {
274 mdc(bus, 0); 171 mdc(bitbang, 0);
275 mdio(bus, (reg & 0x10) != 0); 172 mdio(bitbang, (reg & 0x10) != 0);
276 mii_delay(bus); 173 mii_delay(bitbang);
277 mdc(bus, 1); 174 mdc(bitbang, 1);
278 mii_delay(bus); 175 mii_delay(bitbang);
279 reg <<= 1; 176 reg <<= 1;
280 } 177 }
281} 178}
282 179
283static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) 180static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
284{ 181{
285 u16 rdreg; 182 u16 rdreg;
286 int ret, j; 183 int ret, j;
287 u8 addr = phy_id & 0xff; 184 u8 addr = phy_id & 0xff;
288 u8 reg = location & 0xff; 185 u8 reg = location & 0xff;
186 struct bb_info* bitbang = bus->priv;
289 187
290 bitbang_pre(bus, 1, addr, reg); 188 bitbang_pre(bitbang, 1, addr, reg);
291 189
292 /* tri-state our MDIO I/O pin so we can read */ 190 /* tri-state our MDIO I/O pin so we can read */
293 mdc(bus, 0); 191 mdc(bitbang, 0);
294 mdio_tristate(bus); 192 mdio_tristate(bitbang);
295 mii_delay(bus); 193 mii_delay(bitbang);
296 mdc(bus, 1); 194 mdc(bitbang, 1);
297 mii_delay(bus); 195 mii_delay(bitbang);
298 196
299 /* check the turnaround bit: the PHY should be driving it to zero */ 197 /* check the turnaround bit: the PHY should be driving it to zero */
300 if (mdio_read(bus) != 0) { 198 if (mdio_read(bitbang) != 0) {
301 /* PHY didn't drive TA low */ 199 /* PHY didn't drive TA low */
302 for (j = 0; j < 32; j++) { 200 for (j = 0; j < 32; j++) {
303 mdc(bus, 0); 201 mdc(bitbang, 0);
304 mii_delay(bus); 202 mii_delay(bitbang);
305 mdc(bus, 1); 203 mdc(bitbang, 1);
306 mii_delay(bus); 204 mii_delay(bitbang);
307 } 205 }
308 ret = -1; 206 ret = -1;
309 goto out; 207 goto out;
310 } 208 }
311 209
312 mdc(bus, 0); 210 mdc(bitbang, 0);
313 mii_delay(bus); 211 mii_delay(bitbang);
314 212
315 /* read 16 bits of register data, MSB first */ 213 /* read 16 bits of register data, MSB first */
316 rdreg = 0; 214 rdreg = 0;
317 for (j = 0; j < 16; j++) { 215 for (j = 0; j < 16; j++) {
318 mdc(bus, 1); 216 mdc(bitbang, 1);
319 mii_delay(bus); 217 mii_delay(bitbang);
320 rdreg <<= 1; 218 rdreg <<= 1;
321 rdreg |= mdio_read(bus); 219 rdreg |= mdio_read(bitbang);
322 mdc(bus, 0); 220 mdc(bitbang, 0);
323 mii_delay(bus); 221 mii_delay(bitbang);
324 } 222 }
325 223
326 mdc(bus, 1); 224 mdc(bitbang, 1);
327 mii_delay(bus); 225 mii_delay(bitbang);
328 mdc(bus, 0); 226 mdc(bitbang, 0);
329 mii_delay(bus); 227 mii_delay(bitbang);
330 mdc(bus, 1); 228 mdc(bitbang, 1);
331 mii_delay(bus); 229 mii_delay(bitbang);
332 230
333 ret = rdreg; 231 ret = rdreg;
334out: 232out:
335 return ret; 233 return ret;
336} 234}
337 235
338static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) 236static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
339{ 237{
340 int j; 238 int j;
239 struct bb_info* bitbang = bus->priv;
240
341 u8 addr = phy_id & 0xff; 241 u8 addr = phy_id & 0xff;
342 u8 reg = location & 0xff; 242 u8 reg = location & 0xff;
343 u16 value = val & 0xffff; 243 u16 value = val & 0xffff;
344 244
345 bitbang_pre(bus, 0, addr, reg); 245 bitbang_pre(bitbang, 0, addr, reg);
346 246
347 /* send the turnaround (10) */ 247 /* send the turnaround (10) */
348 mdc(bus, 0); 248 mdc(bitbang, 0);
349 mdio(bus, 1); 249 mdio(bitbang, 1);
350 mii_delay(bus); 250 mii_delay(bitbang);
351 mdc(bus, 1); 251 mdc(bitbang, 1);
352 mii_delay(bus); 252 mii_delay(bitbang);
353 mdc(bus, 0); 253 mdc(bitbang, 0);
354 mdio(bus, 0); 254 mdio(bitbang, 0);
355 mii_delay(bus); 255 mii_delay(bitbang);
356 mdc(bus, 1); 256 mdc(bitbang, 1);
357 mii_delay(bus); 257 mii_delay(bitbang);
358 258
359 /* write 16 bits of register data, MSB first */ 259 /* write 16 bits of register data, MSB first */
360 for (j = 0; j < 16; j++) { 260 for (j = 0; j < 16; j++) {
361 mdc(bus, 0); 261 mdc(bitbang, 0);
362 mdio(bus, (value & 0x8000) != 0); 262 mdio(bitbang, (value & 0x8000) != 0);
363 mii_delay(bus); 263 mii_delay(bitbang);
364 mdc(bus, 1); 264 mdc(bitbang, 1);
365 mii_delay(bus); 265 mii_delay(bitbang);
366 value <<= 1; 266 value <<= 1;
367 } 267 }
368 268
369 /* 269 /*
370 * Tri-state the MDIO line. 270 * Tri-state the MDIO line.
371 */ 271 */
372 mdio_tristate(bus); 272 mdio_tristate(bitbang);
373 mdc(bus, 0); 273 mdc(bitbang, 0);
374 mii_delay(bus); 274 mii_delay(bitbang);
375 mdc(bus, 1); 275 mdc(bitbang, 1);
376 mii_delay(bus); 276 mii_delay(bitbang);
277 return 0;
377} 278}
378 279
379int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) 280static int fs_enet_mii_bb_reset(struct mii_bus *bus)
281{
282 /*nothing here - dunno how to reset it*/
283 return 0;
284}
285
286static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi)
380{ 287{
381 const struct fs_mii_bus_info *bi = bus->bus_info;
382 int r; 288 int r;
383 289
384 r = bitbang_prep_bit(&bus->bitbang.mdio_dir, 290 bitbang->delay = fmpi->delay;
385 &bus->bitbang.mdio_dat, 291
386 &bus->bitbang.mdio_msk, 292 r = bitbang_prep_bit(&bitbang->mdio_dir,
387 bi->i.bitbang.mdio_port, 293 &bitbang->mdio_dir_msk,
388 bi->i.bitbang.mdio_bit); 294 &fmpi->mdio_dir);
389 if (r != 0) 295 if (r != 0)
390 return r; 296 return r;
391 297
392 r = bitbang_prep_bit(&bus->bitbang.mdc_dir, 298 r = bitbang_prep_bit(&bitbang->mdio_dat,
393 &bus->bitbang.mdc_dat, 299 &bitbang->mdio_dat_msk,
394 &bus->bitbang.mdc_msk, 300 &fmpi->mdio_dat);
395 bi->i.bitbang.mdc_port,
396 bi->i.bitbang.mdc_bit);
397 if (r != 0) 301 if (r != 0)
398 return r; 302 return r;
399 303
400 bus->mii_read = mii_read; 304 r = bitbang_prep_bit(&bitbang->mdc_dat,
401 bus->mii_write = mii_write; 305 &bitbang->mdc_msk,
306 &fmpi->mdc_dat);
307 if (r != 0)
308 return r;
402 309
403 return 0; 310 return 0;
404} 311}
312
313
314static int __devinit fs_enet_mdio_probe(struct device *dev)
315{
316 struct platform_device *pdev = to_platform_device(dev);
317 struct fs_mii_bb_platform_info *pdata;
318 struct mii_bus *new_bus;
319 struct bb_info *bitbang;
320 int err = 0;
321
322 if (NULL == dev)
323 return -EINVAL;
324
325 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
326
327 if (NULL == new_bus)
328 return -ENOMEM;
329
330 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
331
332 if (NULL == bitbang)
333 return -ENOMEM;
334
335 new_bus->name = "BB MII Bus",
336 new_bus->read = &fs_enet_mii_bb_read,
337 new_bus->write = &fs_enet_mii_bb_write,
338 new_bus->reset = &fs_enet_mii_bb_reset,
339 new_bus->id = pdev->id;
340
341 new_bus->phy_mask = ~0x9;
342 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
343
344 if (NULL == pdata) {
345 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
346 return -ENODEV;
347 }
348
349 /*set up workspace*/
350 fs_mii_bitbang_init(bitbang, pdata);
351
352 new_bus->priv = bitbang;
353
354 new_bus->irq = pdata->irq;
355
356 new_bus->dev = dev;
357 dev_set_drvdata(dev, new_bus);
358
359 err = mdiobus_register(new_bus);
360
361 if (0 != err) {
362 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
363 new_bus->name);
364 goto bus_register_fail;
365 }
366
367 return 0;
368
369bus_register_fail:
370 kfree(bitbang);
371 kfree(new_bus);
372
373 return err;
374}
375
376
377static int fs_enet_mdio_remove(struct device *dev)
378{
379 struct mii_bus *bus = dev_get_drvdata(dev);
380
381 mdiobus_unregister(bus);
382
383 dev_set_drvdata(dev, NULL);
384
385 iounmap((void *) (&bus->priv));
386 bus->priv = NULL;
387 kfree(bus);
388
389 return 0;
390}
391
392static struct device_driver fs_enet_bb_mdio_driver = {
393 .name = "fsl-bb-mdio",
394 .bus = &platform_bus_type,
395 .probe = fs_enet_mdio_probe,
396 .remove = fs_enet_mdio_remove,
397};
398
399int fs_enet_mdio_bb_init(void)
400{
401 return driver_register(&fs_enet_bb_mdio_driver);
402}
403
404void fs_enet_mdio_bb_exit(void)
405{
406 driver_unregister(&fs_enet_bb_mdio_driver);
407}
408
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
new file mode 100644
index 00000000000..1328e10caa3
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -0,0 +1,243 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37#include <linux/platform_device.h>
38
39#include <asm/pgtable.h>
40#include <asm/irq.h>
41#include <asm/uaccess.h>
42
43#include "fs_enet.h"
44#include "fec.h"
45
46/* Make MII read/write commands for the FEC.
47*/
48#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
49#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
50#define mk_mii_end 0
51
52#define FEC_MII_LOOPS 10000
53
54static int match_has_phy (struct device *dev, void* data)
55{
56 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
57 struct fs_platform_info* fpi;
58 if(strcmp(pdev->name, (char*)data))
59 {
60 return 0;
61 }
62
63 fpi = pdev->dev.platform_data;
64 if((fpi)&&(fpi->has_phy))
65 return 1;
66 return 0;
67}
68
69static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
70{
71 struct resource *r;
72 fec_t *fecp;
73 char* name = "fsl-cpm-fec";
74
75 /* we need fec in order to be useful */
76 struct platform_device *fec_pdev =
77 container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
78 struct platform_device, dev);
79
80 if(fec_pdev == NULL) {
81 printk(KERN_ERR"Unable to find PHY for %s", name);
82 return -ENODEV;
83 }
84
85 r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
86
87 fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
88 fec->mii_speed = fmpi->mii_speed;
89
90 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
91 setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
92 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
93 out_be32(&fecp->fec_mii_speed, fec->mii_speed);
94
95 return 0;
96}
97
98static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
99{
100 struct fec_info* fec = bus->priv;
101 fec_t *fecp = fec->fecp;
102 int i, ret = -1;
103
104 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
105 BUG();
106
107 /* Add PHY address to register command. */
108 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
109
110 for (i = 0; i < FEC_MII_LOOPS; i++)
111 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
112 break;
113
114 if (i < FEC_MII_LOOPS) {
115 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
116 ret = in_be32(&fecp->fec_mii_data) & 0xffff;
117 }
118
119 return ret;
120
121}
122
123static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
124{
125 struct fec_info* fec = bus->priv;
126 fec_t *fecp = fec->fecp;
127 int i;
128
129 /* this must never happen */
130 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
131 BUG();
132
133 /* Add PHY address to register command. */
134 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
135
136 for (i = 0; i < FEC_MII_LOOPS; i++)
137 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
138 break;
139
140 if (i < FEC_MII_LOOPS)
141 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
142
143 return 0;
144
145}
146
147static int fs_enet_fec_mii_reset(struct mii_bus *bus)
148{
149 /* nothing here - for now */
150 return 0;
151}
152
153static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
154{
155 struct platform_device *pdev = to_platform_device(dev);
156 struct fs_mii_fec_platform_info *pdata;
157 struct mii_bus *new_bus;
158 struct fec_info *fec;
159 int err = 0;
160 if (NULL == dev)
161 return -EINVAL;
162 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
163
164 if (NULL == new_bus)
165 return -ENOMEM;
166
167 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
168
169 if (NULL == fec)
170 return -ENOMEM;
171
172 new_bus->name = "FEC MII Bus",
173 new_bus->read = &fs_enet_fec_mii_read,
174 new_bus->write = &fs_enet_fec_mii_write,
175 new_bus->reset = &fs_enet_fec_mii_reset,
176 new_bus->id = pdev->id;
177
178 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
179
180 if (NULL == pdata) {
181 printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
182 return -ENODEV;
183 }
184
185 /*set up workspace*/
186
187 fs_mii_fec_init(fec, pdata);
188 new_bus->priv = fec;
189
190 new_bus->irq = pdata->irq;
191
192 new_bus->dev = dev;
193 dev_set_drvdata(dev, new_bus);
194
195 err = mdiobus_register(new_bus);
196
197 if (0 != err) {
198 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
199 new_bus->name);
200 goto bus_register_fail;
201 }
202
203 return 0;
204
205bus_register_fail:
206 kfree(new_bus);
207
208 return err;
209}
210
211
212static int fs_enet_fec_mdio_remove(struct device *dev)
213{
214 struct mii_bus *bus = dev_get_drvdata(dev);
215
216 mdiobus_unregister(bus);
217
218 dev_set_drvdata(dev, NULL);
219 kfree(bus->priv);
220
221 bus->priv = NULL;
222 kfree(bus);
223
224 return 0;
225}
226
227static struct device_driver fs_enet_fec_mdio_driver = {
228 .name = "fsl-cpm-fec-mdio",
229 .bus = &platform_bus_type,
230 .probe = fs_enet_fec_mdio_probe,
231 .remove = fs_enet_fec_mdio_remove,
232};
233
234int fs_enet_mdio_fec_init(void)
235{
236 return driver_register(&fs_enet_fec_mdio_driver);
237}
238
239void fs_enet_mdio_fec_exit(void)
240{
241 driver_unregister(&fs_enet_fec_mdio_driver);
242}
243
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
deleted file mode 100644
index ae4a9c3bb39..00000000000
--- a/drivers/net/fs_enet/mii-fixed.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36
37#include <asm/pgtable.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#include "fs_enet.h"
42
43static const u16 mii_regs[7] = {
44 0x3100,
45 0x786d,
46 0x0fff,
47 0x0fff,
48 0x01e1,
49 0x45e1,
50 0x0003,
51};
52
53static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
54{
55 int ret = 0;
56
57 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
58 return -1;
59
60 if (location != 5)
61 ret = mii_regs[location];
62 else
63 ret = bus->fixed.lpa;
64
65 return ret;
66}
67
68static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
69{
70 /* do nothing */
71}
72
73int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
74{
75 const struct fs_mii_bus_info *bi = bus->bus_info;
76
77 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
78
79 /* if speed is fixed at 10Mb, remove 100Mb modes */
80 if (bi->i.fixed.speed == 10)
81 bus->fixed.lpa &= ~LPA_100;
82
83 /* if duplex is half, remove full duplex modes */
84 if (bi->i.fixed.duplex == 0)
85 bus->fixed.lpa &= ~LPA_DUPLEX;
86
87 bus->mii_read = mii_read;
88 bus->mii_write = mii_write;
89
90 return 0;
91}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index c1c3452c90c..5b4dbfe5fb7 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); 326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); 327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
328 328
329int init_module(void) 329int __init init_module(void)
330{ 330{
331 struct net_device *dev; 331 struct net_device *dev;
332 int this_dev, found = 0; 332 int this_dev, found = 0;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 646e89fc356..c0ec7f6abcb 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); 406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
407MODULE_LICENSE("GPL"); 407MODULE_LICENSE("GPL");
408 408
409int init_module(void) 409int __init init_module(void)
410{ 410{
411 struct net_device *dev; 411 struct net_device *dev;
412 int this_dev, found = 0; 412 int this_dev, found = 0;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 06440a86bae..9bdd43ab357 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2425,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
2425 } 2425 }
2426 2426
2427 myri10ge_reset(mgp); 2427 myri10ge_reset(mgp);
2428 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 2428 myri10ge_dummy_rdma(mgp, 1);
2429 2429
2430 /* Save configuration space to be restored if the 2430 /* Save configuration space to be restored if the
2431 * nic resets due to a parity error */ 2431 * nic resets due to a parity error */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index fa854c8fde7..4d52ecf8af5 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); 1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); 1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1325 1325
1326int init_module(void) 1326int __init init_module(void)
1327{ 1327{
1328 if(io <= 0x0 || !memend || !memstart || irq < 2) { 1328 if(io <= 0x0 || !memend || !memstart || irq < 2) {
1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index bb42ff21848..810cc572f5f 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1253MODULE_PARM_DESC(io, "ni6510 I/O base address"); 1253MODULE_PARM_DESC(io, "ni6510 I/O base address");
1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); 1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1255 1255
1256int init_module(void) 1256int __init init_module(void)
1257{ 1257{
1258 dev_ni65 = ni65_probe(-1); 1258 dev_ni65 = ni65_probe(-1);
1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; 1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 9bae77ce131..4122bb46f5f 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -345,6 +345,7 @@ typedef struct local_info_t {
345 void __iomem *dingo_ccr; /* only used for CEM56 cards */ 345 void __iomem *dingo_ccr; /* only used for CEM56 cards */
346 unsigned last_ptr_value; /* last packets transmitted value */ 346 unsigned last_ptr_value; /* last packets transmitted value */
347 const char *manf_str; 347 const char *manf_str;
348 struct work_struct tx_timeout_task;
348} local_info_t; 349} local_info_t;
349 350
350/**************** 351/****************
@@ -352,6 +353,7 @@ typedef struct local_info_t {
352 */ 353 */
353static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 354static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
354static void do_tx_timeout(struct net_device *dev); 355static void do_tx_timeout(struct net_device *dev);
356static void xirc2ps_tx_timeout_task(void *data);
355static struct net_device_stats *do_get_stats(struct net_device *dev); 357static struct net_device_stats *do_get_stats(struct net_device *dev);
356static void set_addresses(struct net_device *dev); 358static void set_addresses(struct net_device *dev);
357static void set_multicast_list(struct net_device *dev); 359static void set_multicast_list(struct net_device *dev);
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link)
589#ifdef HAVE_TX_TIMEOUT 591#ifdef HAVE_TX_TIMEOUT
590 dev->tx_timeout = do_tx_timeout; 592 dev->tx_timeout = do_tx_timeout;
591 dev->watchdog_timeo = TX_TIMEOUT; 593 dev->watchdog_timeo = TX_TIMEOUT;
594 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
592#endif 595#endif
593 596
594 return xirc2ps_config(link); 597 return xirc2ps_config(link);
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1341/*====================================================================*/ 1344/*====================================================================*/
1342 1345
1343static void 1346static void
1344do_tx_timeout(struct net_device *dev) 1347xirc2ps_tx_timeout_task(void *data)
1345{ 1348{
1346 local_info_t *lp = netdev_priv(dev); 1349 struct net_device *dev = data;
1347 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1348 lp->stats.tx_errors++;
1349 /* reset the card */ 1350 /* reset the card */
1350 do_reset(dev,1); 1351 do_reset(dev,1);
1351 dev->trans_start = jiffies; 1352 dev->trans_start = jiffies;
1352 netif_wake_queue(dev); 1353 netif_wake_queue(dev);
1353} 1354}
1354 1355
1356static void
1357do_tx_timeout(struct net_device *dev)
1358{
1359 local_info_t *lp = netdev_priv(dev);
1360 lp->stats.tx_errors++;
1361 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1362 schedule_work(&lp->tx_timeout_task);
1363}
1364
1355static int 1365static int
1356do_start_xmit(struct sk_buff *skb, struct net_device *dev) 1366do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1357{ 1367{
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4daafe30335..d50bcb89dd2 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -202,6 +202,8 @@ static int homepna[MAX_UNITS];
202#define CSR15 15 202#define CSR15 15
203#define PCNET32_MC_FILTER 8 203#define PCNET32_MC_FILTER 8
204 204
205#define PCNET32_79C970A 0x2621
206
205/* The PCNET32 Rx and Tx ring descriptors. */ 207/* The PCNET32 Rx and Tx ring descriptors. */
206struct pcnet32_rx_head { 208struct pcnet32_rx_head {
207 u32 base; 209 u32 base;
@@ -289,6 +291,7 @@ struct pcnet32_private {
289 291
290 /* each bit indicates an available PHY */ 292 /* each bit indicates an available PHY */
291 u32 phymask; 293 u32 phymask;
294 unsigned short chip_version; /* which variant this is */
292}; 295};
293 296
294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 297static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev)
724 spin_lock_irqsave(&lp->lock, flags); 727 spin_lock_irqsave(&lp->lock, flags);
725 if (lp->mii) { 728 if (lp->mii) {
726 r = mii_link_ok(&lp->mii_if); 729 r = mii_link_ok(&lp->mii_if);
727 } else { 730 } else if (lp->chip_version >= PCNET32_79C970A) {
728 ulong ioaddr = dev->base_addr; /* card base I/O address */ 731 ulong ioaddr = dev->base_addr; /* card base I/O address */
729 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); 732 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
733 } else { /* can not detect link on really old chips */
734 r = 1;
730 } 735 }
731 spin_unlock_irqrestore(&lp->lock, flags); 736 spin_unlock_irqrestore(&lp->lock, flags);
732 737
@@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1091 ulong ioaddr = dev->base_addr; 1096 ulong ioaddr = dev->base_addr;
1092 int ticks; 1097 int ticks;
1093 1098
1099 /* really old chips have to be stopped. */
1100 if (lp->chip_version < PCNET32_79C970A)
1101 return 0;
1102
1094 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1103 /* set SUSPEND (SPND) - CSR5 bit 0 */
1095 csr5 = a->read_csr(ioaddr, CSR5); 1104 csr5 = a->read_csr(ioaddr, CSR5);
1096 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); 1105 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
@@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1529 lp->mii_if.reg_num_mask = 0x1f; 1538 lp->mii_if.reg_num_mask = 0x1f;
1530 lp->dxsuflo = dxsuflo; 1539 lp->dxsuflo = dxsuflo;
1531 lp->mii = mii; 1540 lp->mii = mii;
1541 lp->chip_version = chip_version;
1532 lp->msg_enable = pcnet32_debug; 1542 lp->msg_enable = pcnet32_debug;
1533 if ((cards_found >= MAX_UNITS) 1543 if ((cards_found >= MAX_UNITS)
1534 || (options[cards_found] > sizeof(options_mapping))) 1544 || (options[cards_found] > sizeof(options_mapping)))
@@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev)
1839 val |= 2; 1849 val |= 2;
1840 } else if (lp->options & PCNET32_PORT_ASEL) { 1850 } else if (lp->options & PCNET32_PORT_ASEL) {
1841 /* workaround of xSeries250, turn on for 79C975 only */ 1851 /* workaround of xSeries250, turn on for 79C975 only */
1842 i = ((lp->a.read_csr(ioaddr, 88) | 1852 if (lp->chip_version == 0x2627)
1843 (lp->a.
1844 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1845 if (i == 0x2627)
1846 val |= 3; 1853 val |= 3;
1847 } 1854 }
1848 lp->a.write_bcr(ioaddr, 9, val); 1855 lp->a.write_bcr(ioaddr, 9, val);
@@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev)
1986 1993
1987 netif_start_queue(dev); 1994 netif_start_queue(dev);
1988 1995
1989 /* Print the link status and start the watchdog */ 1996 if (lp->chip_version >= PCNET32_79C970A) {
1990 pcnet32_check_media(dev, 1); 1997 /* Print the link status and start the watchdog */
1991 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 1998 pcnet32_check_media(dev, 1);
1999 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2000 }
1992 2001
1993 i = 0; 2002 i = 0;
1994 while (i++ < 100) 2003 while (i++ < 100)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2ba6d3a40e2..b79ec0d7480 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,5 +56,22 @@ config SMSC_PHY
56 ---help--- 56 ---help---
57 Currently supports the LAN83C185 PHY 57 Currently supports the LAN83C185 PHY
58 58
59config FIXED_PHY
60 tristate "Drivers for PHY emulation on fixed speed/link"
61 depends on PHYLIB
62 ---help---
63 Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
64 but with the ability to manipulate with speed/link in software. The relavant MII
65 speed/duplex parameters could be effectively handled in user-specified fuction.
66 Currently tested with mpc866ads.
67
68config FIXED_MII_10_FDX
69 bool "Emulation for 10M Fdx fixed PHY behavior"
70 depends on FIXED_PHY
71
72config FIXED_MII_100_FDX
73 bool "Emulation for 100M Fdx fixed PHY behavior"
74 depends on FIXED_PHY
75
59endmenu 76endmenu
60 77
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a00e6194252..320f8323123 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
10obj-$(CONFIG_QSEMI_PHY) += qsemi.o 10obj-$(CONFIG_QSEMI_PHY) += qsemi.o
11obj-$(CONFIG_SMSC_PHY) += smsc.o 11obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
new file mode 100644
index 00000000000..341036df471
--- /dev/null
+++ b/drivers/net/phy/fixed.c
@@ -0,0 +1,358 @@
1/*
2 * drivers/net/phy/fixed.c
3 *
4 * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
5 *
6 * Author: Vitaly Bordug
7 *
8 * Copyright (c) 2006 MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/phy.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/uaccess.h>
39
40#define MII_REGS_NUM 7
41
42/*
43 The idea is to emulate normal phy behavior by responding with
44 pre-defined values to mii BMCR read, so that read_status hook could
45 take all the needed info.
46*/
47
48struct fixed_phy_status {
49 u8 link;
50 u16 speed;
51 u8 duplex;
52};
53
54/*-----------------------------------------------------------------------------
55 * Private information hoder for mii_bus
56 *-----------------------------------------------------------------------------*/
57struct fixed_info {
58 u16 *regs;
59 u8 regs_num;
60 struct fixed_phy_status phy_status;
61 struct phy_device *phydev; /* pointer to the container */
62 /* link & speed cb */
63 int(*link_update)(struct net_device*, struct fixed_phy_status*);
64
65};
66
67/*-----------------------------------------------------------------------------
68 * If something weird is required to be done with link/speed,
69 * network driver is able to assign a function to implement this.
70 * May be useful for PHY's that need to be software-driven.
71 *-----------------------------------------------------------------------------*/
72int fixed_mdio_set_link_update(struct phy_device* phydev,
73 int(*link_update)(struct net_device*, struct fixed_phy_status*))
74{
75 struct fixed_info *fixed;
76
77 if(link_update == NULL)
78 return -EINVAL;
79
80 if(phydev) {
81 if(phydev->bus) {
82 fixed = phydev->bus->priv;
83 fixed->link_update = link_update;
84 return 0;
85 }
86 }
87 return -EINVAL;
88}
89EXPORT_SYMBOL(fixed_mdio_set_link_update);
90
91/*-----------------------------------------------------------------------------
92 * This is used for updating internal mii regs from the status
93 *-----------------------------------------------------------------------------*/
94static int fixed_mdio_update_regs(struct fixed_info *fixed)
95{
96 u16 *regs = fixed->regs;
97 u16 bmsr = 0;
98 u16 bmcr = 0;
99
100 if(!regs) {
101 printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
102 return -EINVAL;
103 }
104
105 if(fixed->phy_status.link)
106 bmsr |= BMSR_LSTATUS;
107
108 if(fixed->phy_status.duplex) {
109 bmcr |= BMCR_FULLDPLX;
110
111 switch ( fixed->phy_status.speed ) {
112 case 100:
113 bmsr |= BMSR_100FULL;
114 bmcr |= BMCR_SPEED100;
115 break;
116
117 case 10:
118 bmsr |= BMSR_10FULL;
119 break;
120 }
121 } else {
122 switch ( fixed->phy_status.speed ) {
123 case 100:
124 bmsr |= BMSR_100HALF;
125 bmcr |= BMCR_SPEED100;
126 break;
127
128 case 10:
129 bmsr |= BMSR_100HALF;
130 break;
131 }
132 }
133
134 regs[MII_BMCR] = bmcr;
135 regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/
136
137 return 0;
138}
139
140static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
141{
142 struct fixed_info *fixed = bus->priv;
143
144 /* if user has registered link update callback, use it */
145 if(fixed->phydev)
146 if(fixed->phydev->attached_dev) {
147 if(fixed->link_update) {
148 fixed->link_update(fixed->phydev->attached_dev,
149 &fixed->phy_status);
150 fixed_mdio_update_regs(fixed);
151 }
152 }
153
154 if ((unsigned int)location >= fixed->regs_num)
155 return -1;
156 return fixed->regs[location];
157}
158
159static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
160{
161 /* do nothing for now*/
162 return 0;
163}
164
165static int fixed_mii_reset(struct mii_bus *bus)
166{
167 /*nothing here - no way/need to reset it*/
168 return 0;
169}
170
171static int fixed_config_aneg(struct phy_device *phydev)
172{
173 /* :TODO:03/13/2006 09:45:37 PM::
174 The full autoneg funcionality can be emulated,
175 but no need to have anything here for now
176 */
177 return 0;
178}
179
180/*-----------------------------------------------------------------------------
181 * the manual bind will do the magic - with phy_id_mask == 0
182 * match will never return true...
183 *-----------------------------------------------------------------------------*/
184static struct phy_driver fixed_mdio_driver = {
185 .name = "Fixed PHY",
186 .features = PHY_BASIC_FEATURES,
187 .config_aneg = fixed_config_aneg,
188 .read_status = genphy_read_status,
189 .driver = { .owner = THIS_MODULE,},
190};
191
192/*-----------------------------------------------------------------------------
193 * This func is used to create all the necessary stuff, bind
194 * the fixed phy driver and register all it on the mdio_bus_type.
195 * speed is either 10 or 100, duplex is boolean.
196 * number is used to create multiple fixed PHYs, so that several devices can
197 * utilize them simultaneously.
198 *-----------------------------------------------------------------------------*/
199static int fixed_mdio_register_device(int number, int speed, int duplex)
200{
201 struct mii_bus *new_bus;
202 struct fixed_info *fixed;
203 struct phy_device *phydev;
204 int err = 0;
205
206 struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL);
207
208 if (NULL == dev)
209 return -ENOMEM;
210
211 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
212
213 if (NULL == new_bus) {
214 kfree(dev);
215 return -ENOMEM;
216 }
217 fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
218
219 if (NULL == fixed) {
220 kfree(dev);
221 kfree(new_bus);
222 return -ENOMEM;
223 }
224
225 fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL);
226 fixed->regs_num = MII_REGS_NUM;
227 fixed->phy_status.speed = speed;
228 fixed->phy_status.duplex = duplex;
229 fixed->phy_status.link = 1;
230
231 new_bus->name = "Fixed MII Bus",
232 new_bus->read = &fixed_mii_read,
233 new_bus->write = &fixed_mii_write,
234 new_bus->reset = &fixed_mii_reset,
235
236 /*set up workspace*/
237 fixed_mdio_update_regs(fixed);
238 new_bus->priv = fixed;
239
240 new_bus->dev = dev;
241 dev_set_drvdata(dev, new_bus);
242
243 /* create phy_device and register it on the mdio bus */
244 phydev = phy_device_create(new_bus, 0, 0);
245
246 /*
247 Put the phydev pointer into the fixed pack so that bus read/write code could
248 be able to access for instance attached netdev. Well it doesn't have to do
249 so, only in case of utilizing user-specified link-update...
250 */
251 fixed->phydev = phydev;
252
253 if(NULL == phydev) {
254 err = -ENOMEM;
255 goto device_create_fail;
256 }
257
258 phydev->irq = -1;
259 phydev->dev.bus = &mdio_bus_type;
260
261 if(number)
262 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
263 "fixed_%d@%d:%d", number, speed, duplex);
264 else
265 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
266 "fixed@%d:%d", speed, duplex);
267 phydev->bus = new_bus;
268
269 err = device_register(&phydev->dev);
270 if(err) {
271 printk(KERN_ERR "Phy %s failed to register\n",
272 phydev->dev.bus_id);
273 goto bus_register_fail;
274 }
275
276 /*
277 the mdio bus has phy_id match... In order not to do it
278 artificially, we are binding the driver here by hand;
279 it will be the same for all the fixed phys anyway.
280 */
281 down_write(&phydev->dev.bus->subsys.rwsem);
282
283 phydev->dev.driver = &fixed_mdio_driver.driver;
284
285 err = phydev->dev.driver->probe(&phydev->dev);
286 if(err < 0) {
287 printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
288 up_write(&phydev->dev.bus->subsys.rwsem);
289 goto probe_fail;
290 }
291
292 device_bind_driver(&phydev->dev);
293 up_write(&phydev->dev.bus->subsys.rwsem);
294
295 return 0;
296
297probe_fail:
298 device_unregister(&phydev->dev);
299bus_register_fail:
300 kfree(phydev);
301device_create_fail:
302 kfree(dev);
303 kfree(new_bus);
304 kfree(fixed);
305
306 return err;
307}
308
309
310MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
311MODULE_AUTHOR("Vitaly Bordug");
312MODULE_LICENSE("GPL");
313
314static int __init fixed_init(void)
315{
316 int ret;
317 int duplex = 0;
318
319 /* register on the bus... Not expected to be matched with anything there... */
320 phy_driver_register(&fixed_mdio_driver);
321
322 /* So let the fun begin...
323 We will create several mdio devices here, and will bound the upper
324 driver to them.
325
326 Then the external software can lookup the phy bus by searching
327 fixed@speed:duplex, e.g. fixed@100:1, to be connected to the
328 virtual 100M Fdx phy.
329
330 In case several virtual PHYs required, the bus_id will be in form
331 fixed_<num>@<speed>:<duplex>, which make it able even to define
332 driver-specific link control callback, if for instance PHY is completely
333 SW-driven.
334
335 */
336
337#ifdef CONFIG_FIXED_MII_DUPLEX
338 duplex = 1;
339#endif
340
341#ifdef CONFIG_FIXED_MII_100_FDX
342 fixed_mdio_register_device(0, 100, 1);
343#endif
344
345#ifdef CONFIX_FIXED_MII_10_FDX
346 fixed_mdio_register_device(0, 10, 1);
347#endif
348 return 0;
349}
350
351static void __exit fixed_exit(void)
352{
353 phy_driver_unregister(&fixed_mdio_driver);
354 /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */
355}
356
357module_init(fixed_init);
358module_exit(fixed_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 1dde390c164..cf6660c93ff 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = {
159 .suspend = mdio_bus_suspend, 159 .suspend = mdio_bus_suspend,
160 .resume = mdio_bus_resume, 160 .resume = mdio_bus_resume,
161}; 161};
162EXPORT_SYMBOL(mdio_bus_type);
162 163
163int __init mdio_bus_init(void) 164int __init mdio_bus_init(void)
164{ 165{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1bc1e032c5d..2d1ecfdc80d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver;
45extern int mdio_bus_init(void); 45extern int mdio_bus_init(void);
46extern void mdio_bus_exit(void); 46extern void mdio_bus_exit(void);
47 47
48struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
49{
50 struct phy_device *dev;
51 /* We allocate the device, and initialize the
52 * default values */
53 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
54
55 if (NULL == dev)
56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
57
58 dev->speed = 0;
59 dev->duplex = -1;
60 dev->pause = dev->asym_pause = 0;
61 dev->link = 1;
62
63 dev->autoneg = AUTONEG_ENABLE;
64
65 dev->addr = addr;
66 dev->phy_id = phy_id;
67 dev->bus = bus;
68
69 dev->state = PHY_DOWN;
70
71 spin_lock_init(&dev->lock);
72
73 return dev;
74}
75EXPORT_SYMBOL(phy_device_create);
76
48/* get_phy_device 77/* get_phy_device
49 * 78 *
50 * description: Reads the ID registers of the PHY at addr on the 79 * description: Reads the ID registers of the PHY at addr on the
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
78 if (0xffffffff == phy_id) 107 if (0xffffffff == phy_id)
79 return NULL; 108 return NULL;
80 109
81 /* Otherwise, we allocate the device, and initialize the 110 dev = phy_device_create(bus, addr, phy_id);
82 * default values */
83 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
84
85 if (NULL == dev)
86 return ERR_PTR(-ENOMEM);
87
88 dev->speed = 0;
89 dev->duplex = -1;
90 dev->pause = dev->asym_pause = 0;
91 dev->link = 1;
92
93 dev->autoneg = AUTONEG_ENABLE;
94
95 dev->addr = addr;
96 dev->phy_id = phy_id;
97 dev->bus = bus;
98
99 dev->state = PHY_DOWN;
100
101 spin_lock_init(&dev->lock);
102 111
103 return dev; 112 return dev;
104} 113}
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b9..c872f7c6cce 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
192 void *ptr[CARDMAP_WIDTH]; 192 void *ptr[CARDMAP_WIDTH];
193}; 193};
194static void *cardmap_get(struct cardmap *map, unsigned int nr); 194static void *cardmap_get(struct cardmap *map, unsigned int nr);
195static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); 195static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
196static unsigned int cardmap_find_first_free(struct cardmap *map); 196static unsigned int cardmap_find_first_free(struct cardmap *map);
197static void cardmap_destroy(struct cardmap **map); 197static void cardmap_destroy(struct cardmap **map);
198 198
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
1995{ 1995{
1996 struct channel *pch; 1996 struct channel *pch;
1997 1997
1998 pch = kmalloc(sizeof(struct channel), GFP_KERNEL); 1998 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1999 if (pch == 0) 1999 if (pch == 0)
2000 return -ENOMEM; 2000 return -ENOMEM;
2001 memset(pch, 0, sizeof(struct channel));
2002 pch->ppp = NULL; 2001 pch->ppp = NULL;
2003 pch->chan = chan; 2002 pch->chan = chan;
2004 chan->ppp = pch; 2003 chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
2408 int ret = -ENOMEM; 2407 int ret = -ENOMEM;
2409 int i; 2408 int i;
2410 2409
2411 ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); 2410 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
2412 if (!ppp) 2411 if (!ppp)
2413 goto out; 2412 goto out;
2414 dev = alloc_netdev(0, "", ppp_setup); 2413 dev = alloc_netdev(0, "", ppp_setup);
2415 if (!dev) 2414 if (!dev)
2416 goto out1; 2415 goto out1;
2417 memset(ppp, 0, sizeof(struct ppp));
2418 2416
2419 ppp->mru = PPP_MRU; 2417 ppp->mru = PPP_MRU;
2420 init_ppp_file(&ppp->file, INTERFACE); 2418 init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
2454 } 2452 }
2455 2453
2456 atomic_inc(&ppp_unit_count); 2454 atomic_inc(&ppp_unit_count);
2457 cardmap_set(&all_ppp_units, unit, ppp); 2455 ret = cardmap_set(&all_ppp_units, unit, ppp);
2456 if (ret != 0)
2457 goto out3;
2458
2458 mutex_unlock(&all_ppp_mutex); 2459 mutex_unlock(&all_ppp_mutex);
2459 *retp = 0; 2460 *retp = 0;
2460 return ppp; 2461 return ppp;
2461 2462
2463out3:
2464 atomic_dec(&ppp_unit_count);
2462out2: 2465out2:
2463 mutex_unlock(&all_ppp_mutex); 2466 mutex_unlock(&all_ppp_mutex);
2464 free_netdev(dev); 2467 free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
2695 return NULL; 2698 return NULL;
2696} 2699}
2697 2700
2698static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2701static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2699{ 2702{
2700 struct cardmap *p; 2703 struct cardmap *p;
2701 int i; 2704 int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2704 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { 2707 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
2705 do { 2708 do {
2706 /* need a new top level */ 2709 /* need a new top level */
2707 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2710 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2708 memset(np, 0, sizeof(*np)); 2711 if (!np)
2712 goto enomem;
2709 np->ptr[0] = p; 2713 np->ptr[0] = p;
2710 if (p != NULL) { 2714 if (p != NULL) {
2711 np->shift = p->shift + CARDMAP_ORDER; 2715 np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2719 while (p->shift > 0) { 2723 while (p->shift > 0) {
2720 i = (nr >> p->shift) & CARDMAP_MASK; 2724 i = (nr >> p->shift) & CARDMAP_MASK;
2721 if (p->ptr[i] == NULL) { 2725 if (p->ptr[i] == NULL) {
2722 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2726 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2723 memset(np, 0, sizeof(*np)); 2727 if (!np)
2728 goto enomem;
2724 np->shift = p->shift - CARDMAP_ORDER; 2729 np->shift = p->shift - CARDMAP_ORDER;
2725 np->parent = p; 2730 np->parent = p;
2726 p->ptr[i] = np; 2731 p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2735 set_bit(i, &p->inuse); 2740 set_bit(i, &p->inuse);
2736 else 2741 else
2737 clear_bit(i, &p->inuse); 2742 clear_bit(i, &p->inuse);
2743 return 0;
2744 enomem:
2745 return -ENOMEM;
2738} 2746}
2739 2747
2740static unsigned int cardmap_find_first_free(struct cardmap *map) 2748static unsigned int cardmap_find_first_free(struct cardmap *map)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 132ed32bce1..e72e0e09906 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,6 +71,7 @@
71#include <asm/uaccess.h> 71#include <asm/uaccess.h>
72#include <asm/io.h> 72#include <asm/io.h>
73#include <asm/div64.h> 73#include <asm/div64.h>
74#include <asm/irq.h>
74 75
75/* local include */ 76/* local include */
76#include "s2io.h" 77#include "s2io.h"
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index efd0f235020..01392bca022 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -742,7 +742,7 @@ module_param(irq, int, 0);
742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); 742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); 743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
744 744
745int init_module(void) 745int __init init_module(void)
746{ 746{
747 dev_seeq = seeq8005_probe(-1); 747 dev_seeq = seeq8005_probe(-1);
748 if (IS_ERR(dev_seeq)) 748 if (IS_ERR(dev_seeq))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 7de9a07b2ac..ad878dfddef 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2211,6 +2211,7 @@ static int skge_up(struct net_device *dev)
2211 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2211 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2212 skge_led(skge, LED_MODE_ON); 2212 skge_led(skge, LED_MODE_ON);
2213 2213
2214 netif_poll_enable(dev);
2214 return 0; 2215 return 0;
2215 2216
2216 free_rx_ring: 2217 free_rx_ring:
@@ -2279,6 +2280,7 @@ static int skge_down(struct net_device *dev)
2279 2280
2280 skge_led(skge, LED_MODE_OFF); 2281 skge_led(skge, LED_MODE_OFF);
2281 2282
2283 netif_poll_disable(dev);
2282 skge_tx_clean(skge); 2284 skge_tx_clean(skge);
2283 skge_rx_clean(skge); 2285 skge_rx_clean(skge);
2284 2286
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index de91609ca11..933e87f1cc6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
233 if (hw->ports > 1) 233 if (hw->ports > 1)
234 reg1 |= PCI_Y2_PHY2_COMA; 234 reg1 |= PCI_Y2_PHY2_COMA;
235 } 235 }
236 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
237 udelay(100);
236 238
237 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 239 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
238 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 240 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
@@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
242 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 244 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
243 } 245 }
244 246
245 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
246 udelay(100);
247
248 break; 247 break;
249 248
250 case PCI_D3hot: 249 case PCI_D3hot:
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index d37bd860b33..0b15290df27 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs
1092 /* Spurious interrupt check */ 1092 /* Spurious interrupt check */
1093 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != 1093 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
1094 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { 1094 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
1095 spin_unlock_irqrestore(&lp->lock, flags);
1095 return IRQ_NONE; 1096 return IRQ_NONE;
1096 } 1097 }
1097 1098
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 3d8dcb6c875..cf62373b808 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev)
321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
322 322
323 /* Disable all interrupts, block TX tasklet */ 323 /* Disable all interrupts, block TX tasklet */
324 spin_lock(&lp->lock); 324 spin_lock_irq(&lp->lock);
325 SMC_SELECT_BANK(2); 325 SMC_SELECT_BANK(2);
326 SMC_SET_INT_MASK(0); 326 SMC_SET_INT_MASK(0);
327 pending_skb = lp->pending_tx_skb; 327 pending_skb = lp->pending_tx_skb;
328 lp->pending_tx_skb = NULL; 328 lp->pending_tx_skb = NULL;
329 spin_unlock(&lp->lock); 329 spin_unlock_irq(&lp->lock);
330 330
331 /* free any pending tx skb */ 331 /* free any pending tx skb */
332 if (pending_skb) { 332 if (pending_skb) {
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev)
448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
449 449
450 /* no more interrupts for me */ 450 /* no more interrupts for me */
451 spin_lock(&lp->lock); 451 spin_lock_irq(&lp->lock);
452 SMC_SELECT_BANK(2); 452 SMC_SELECT_BANK(2);
453 SMC_SET_INT_MASK(0); 453 SMC_SET_INT_MASK(0);
454 pending_skb = lp->pending_tx_skb; 454 pending_skb = lp->pending_tx_skb;
455 lp->pending_tx_skb = NULL; 455 lp->pending_tx_skb = NULL;
456 spin_unlock(&lp->lock); 456 spin_unlock_irq(&lp->lock);
457 if (pending_skb) 457 if (pending_skb)
458 dev_kfree_skb(pending_skb); 458 dev_kfree_skb(pending_skb);
459 459
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 4ec4b4d23ae..7aa7fbac822 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -136,14 +136,9 @@
136#define SMC_CAN_USE_32BIT 0 136#define SMC_CAN_USE_32BIT 0
137#define SMC_IO_SHIFT 0 137#define SMC_IO_SHIFT 0
138#define SMC_NOWAIT 1 138#define SMC_NOWAIT 1
139#define SMC_USE_PXA_DMA 1
140 139
141#define SMC_inb(a, r) readb((a) + (r))
142#define SMC_inw(a, r) readw((a) + (r)) 140#define SMC_inw(a, r) readw((a) + (r))
143#define SMC_inl(a, r) readl((a) + (r))
144#define SMC_outb(v, a, r) writeb(v, (a) + (r))
145#define SMC_outw(v, a, r) writew(v, (a) + (r)) 141#define SMC_outw(v, a, r) writew(v, (a) + (r))
146#define SMC_outl(v, a, r) writel(v, (a) + (r))
147#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 142#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
148#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 143#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
149 144
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
189#define SMC_IO_SHIFT 0 184#define SMC_IO_SHIFT 0
190#define SMC_NOWAIT 1 185#define SMC_NOWAIT 1
191 186
192#define SMC_inb(a, r) readb((a) + (r))
193#define SMC_outb(v, a, r) writeb(v, (a) + (r))
194#define SMC_inw(a, r) readw((a) + (r)) 187#define SMC_inw(a, r) readw((a) + (r))
195#define SMC_outw(v, a, r) writew(v, (a) + (r)) 188#define SMC_outw(v, a, r) writew(v, (a) + (r))
196#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 189#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
197#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 190#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
198#define SMC_inl(a, r) readl((a) + (r))
199#define SMC_outl(v, a, r) writel(v, (a) + (r))
200#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
201#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
202 191
203#include <asm/mach-types.h> 192#include <asm/mach-types.h>
204#include <asm/arch/cpu.h> 193#include <asm/arch/cpu.h>
@@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
372 361
373#define SMC_IRQ_FLAGS (0) 362#define SMC_IRQ_FLAGS (0)
374 363
364#elif defined(CONFIG_ARCH_VERSATILE)
365
366#define SMC_CAN_USE_8BIT 1
367#define SMC_CAN_USE_16BIT 1
368#define SMC_CAN_USE_32BIT 1
369#define SMC_NOWAIT 1
370
371#define SMC_inb(a, r) readb((a) + (r))
372#define SMC_inw(a, r) readw((a) + (r))
373#define SMC_inl(a, r) readl((a) + (r))
374#define SMC_outb(v, a, r) writeb(v, (a) + (r))
375#define SMC_outw(v, a, r) writew(v, (a) + (r))
376#define SMC_outl(v, a, r) writel(v, (a) + (r))
377#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
378#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
379
380#define SMC_IRQ_FLAGS (0)
381
375#else 382#else
376 383
377#define SMC_CAN_USE_8BIT 1 384#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index ec1a8e2d458..d64e718afbd 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev)
1611 int result; 1611 int result;
1612 1612
1613 result = -ENOMEM; 1613 result = -ENOMEM;
1614 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain, card->descr,
1615 card->descr, 1615 PCI_DMA_TODEVICE, card->tx_desc))
1616 PCI_DMA_TODEVICE, tx_descriptors))
1617 goto alloc_tx_failed; 1616 goto alloc_tx_failed;
1618 if (spider_net_init_chain(card, &card->rx_chain, 1617 if (spider_net_init_chain(card, &card->rx_chain,
1619 card->descr + tx_descriptors, 1618 card->descr + card->rx_desc,
1620 PCI_DMA_FROMDEVICE, rx_descriptors)) 1619 PCI_DMA_FROMDEVICE, card->rx_desc))
1621 goto alloc_rx_failed; 1620 goto alloc_rx_failed;
1622 1621
1623 /* allocate rx skbs */ 1622 /* allocate rx skbs */
@@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card)
2005 2004
2006 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2005 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2007 2006
2007 card->tx_desc = tx_descriptors;
2008 card->rx_desc = rx_descriptors;
2009
2008 spider_net_setup_netdev_ops(netdev); 2010 spider_net_setup_netdev_ops(netdev);
2009 2011
2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; 2012 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index f6dcf180ae3..30407cdf089 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -440,6 +440,9 @@ struct spider_net_card {
440 /* for ethtool */ 440 /* for ethtool */
441 int msg_enable; 441 int msg_enable;
442 442
443 int rx_desc;
444 int tx_desc;
445
443 struct spider_net_descr descr[0]; 446 struct spider_net_descr descr[0];
444}; 447};
445 448
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index a5bb0b7633a..02209222b8c 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
130 return 0; 130 return 0;
131} 131}
132 132
133static void
134spider_net_ethtool_get_ringparam(struct net_device *netdev,
135 struct ethtool_ringparam *ering)
136{
137 struct spider_net_card *card = netdev->priv;
138
139 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
140 ering->tx_pending = card->tx_desc;
141 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
142 ering->rx_pending = card->rx_desc;
143}
144
133struct ethtool_ops spider_net_ethtool_ops = { 145struct ethtool_ops spider_net_ethtool_ops = {
134 .get_settings = spider_net_ethtool_get_settings, 146 .get_settings = spider_net_ethtool_get_settings,
135 .get_drvinfo = spider_net_ethtool_get_drvinfo, 147 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = {
141 .set_rx_csum = spider_net_ethtool_set_rx_csum, 153 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum, 154 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum, 155 .set_tx_csum = spider_net_ethtool_set_tx_csum,
156 .get_ringparam = spider_net_ethtool_get_ringparam,
144}; 157};
145 158
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index ac17377b3e9..698568e751d 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -107,7 +107,7 @@ static char *media[MAX_UNITS];
107#endif 107#endif
108 108
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] =
111KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 111KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
112KERN_INFO " http://www.scyld.com/network/sundance.html\n"; 112KERN_INFO " http://www.scyld.com/network/sundance.html\n";
113 113
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0e3fdf7c6dd..ec0413609f3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void)
1566static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match) 1566static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1567{ 1567{
1568 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 1568 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1569 struct device_node *dp = dev->node;
1570 int err; 1569 int err;
1571 1570
1572 if (!strcmp(dp->name, "le")) { 1571 if (sdev->parent) {
1573 err = sparc_lance_probe_one(sdev, NULL, NULL); 1572 struct of_device *parent = &sdev->parent->ofdev;
1574 } else if (!strcmp(dp->name, "ledma")) {
1575 struct sbus_dma *ledma = find_ledma(sdev);
1576 1573
1577 err = sparc_lance_probe_one(sdev->child, ledma, NULL); 1574 if (!strcmp(parent->node->name, "ledma")) {
1578 } else { 1575 struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
1579 BUG_ON(strcmp(dp->name, "lebuffer"));
1580 1576
1581 err = sparc_lance_probe_one(sdev->child, NULL, sdev); 1577 err = sparc_lance_probe_one(sdev, ledma, NULL);
1582 } 1578 } else if (!strcmp(parent->node->name, "lebuffer")) {
1579 err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
1580 } else
1581 err = sparc_lance_probe_one(sdev, NULL, NULL);
1582 } else
1583 err = sparc_lance_probe_one(sdev, NULL, NULL);
1583 1584
1584 return err; 1585 return err;
1585} 1586}
@@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = {
1604 { 1605 {
1605 .name = "le", 1606 .name = "le",
1606 }, 1607 },
1607 {
1608 .name = "ledma",
1609 },
1610 {
1611 .name = "lebuffer",
1612 },
1613 {}, 1608 {},
1614}; 1609};
1615 1610
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6f97962dd06..eafabb253f0 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.64" 71#define DRV_MODULE_VERSION "3.65"
72#define DRV_MODULE_RELDATE "July 31, 2006" 72#define DRV_MODULE_RELDATE "August 07, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -123,9 +123,6 @@
123 TG3_RX_RCB_RING_SIZE(tp)) 123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE) 125 TG3_TX_RING_SIZE)
126#define TX_BUFFS_AVAIL(TP) \
127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 127
131#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 128#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
2987 spin_unlock(&tp->lock); 2984 spin_unlock(&tp->lock);
2988} 2985}
2989 2986
2987static inline u32 tg3_tx_avail(struct tg3 *tp)
2988{
2989 smp_mb();
2990 return (tp->tx_pending -
2991 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2992}
2993
2990/* Tigon3 never reports partial packet sends. So we do not 2994/* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all 2995 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does. 2996 * of their frags sent yet, like SunGEM does.
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
3038 3042
3039 tp->tx_cons = sw_idx; 3043 tp->tx_cons = sw_idx;
3040 3044
3041 if (unlikely(netif_queue_stopped(tp->dev))) { 3045 /* Need to make the tx_cons update visible to tg3_start_xmit()
3042 spin_lock(&tp->tx_lock); 3046 * before checking for netif_queue_stopped(). Without the
3047 * memory barrier, there is a small possibility that tg3_start_xmit()
3048 * will miss it and cause the queue to be stopped forever.
3049 */
3050 smp_mb();
3051
3052 if (unlikely(netif_queue_stopped(tp->dev) &&
3053 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3054 netif_tx_lock(tp->dev);
3043 if (netif_queue_stopped(tp->dev) && 3055 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 3056 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev); 3057 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock); 3058 netif_tx_unlock(tp->dev);
3047 } 3059 }
3048} 3060}
3049 3061
@@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3101 if (skb == NULL) 3113 if (skb == NULL)
3102 return -ENOMEM; 3114 return -ENOMEM;
3103 3115
3104 skb->dev = tp->dev;
3105 skb_reserve(skb, tp->rx_offset); 3116 skb_reserve(skb, tp->rx_offset);
3106 3117
3107 mapping = pci_map_single(tp->pdev, skb->data, 3118 mapping = pci_map_single(tp->pdev, skb->data,
@@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
3274 if (copy_skb == NULL) 3285 if (copy_skb == NULL)
3275 goto drop_it_no_recycle; 3286 goto drop_it_no_recycle;
3276 3287
3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2); 3288 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len); 3289 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3290 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3797 * interrupt. Furthermore, IRQ processing runs lockless so we have 3807 * interrupt. Furthermore, IRQ processing runs lockless so we have
3798 * no IRQ context deadlocks to worry about either. Rejoice! 3808 * no IRQ context deadlocks to worry about either. Rejoice!
3799 */ 3809 */
3800 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3810 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3801 if (!netif_queue_stopped(dev)) { 3811 if (!netif_queue_stopped(dev)) {
3802 netif_stop_queue(dev); 3812 netif_stop_queue(dev);
3803 3813
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3893 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3903 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3894 3904
3895 tp->tx_prod = entry; 3905 tp->tx_prod = entry;
3896 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 3906 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3897 spin_lock(&tp->tx_lock);
3898 netif_stop_queue(dev); 3907 netif_stop_queue(dev);
3899 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 3908 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3900 netif_wake_queue(tp->dev); 3909 netif_wake_queue(tp->dev);
3901 spin_unlock(&tp->tx_lock);
3902 } 3910 }
3903 3911
3904out_unlock: 3912out_unlock:
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3920 struct sk_buff *segs, *nskb; 3928 struct sk_buff *segs, *nskb;
3921 3929
3922 /* Estimate the number of fragments in the worst case */ 3930 /* Estimate the number of fragments in the worst case */
3923 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { 3931 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3924 netif_stop_queue(tp->dev); 3932 netif_stop_queue(tp->dev);
3925 return NETDEV_TX_BUSY; 3933 return NETDEV_TX_BUSY;
3926 } 3934 }
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3960 * interrupt. Furthermore, IRQ processing runs lockless so we have 3968 * interrupt. Furthermore, IRQ processing runs lockless so we have
3961 * no IRQ context deadlocks to worry about either. Rejoice! 3969 * no IRQ context deadlocks to worry about either. Rejoice!
3962 */ 3970 */
3963 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3971 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3964 if (!netif_queue_stopped(dev)) { 3972 if (!netif_queue_stopped(dev)) {
3965 netif_stop_queue(dev); 3973 netif_stop_queue(dev);
3966 3974
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4110 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 4118 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4111 4119
4112 tp->tx_prod = entry; 4120 tp->tx_prod = entry;
4113 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 4121 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4114 spin_lock(&tp->tx_lock);
4115 netif_stop_queue(dev); 4122 netif_stop_queue(dev);
4116 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 4123 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4117 netif_wake_queue(tp->dev); 4124 netif_wake_queue(tp->dev);
4118 spin_unlock(&tp->tx_lock);
4119 } 4125 }
4120 4126
4121out_unlock: 4127out_unlock:
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11474 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 11480 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11475#endif 11481#endif
11476 spin_lock_init(&tp->lock); 11482 spin_lock_init(&tp->lock);
11477 spin_lock_init(&tp->tx_lock);
11478 spin_lock_init(&tp->indirect_lock); 11483 spin_lock_init(&tp->indirect_lock);
11479 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11484 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11480 11485
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ba2c98711c8..3ecf356cfb0 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2079,9 +2079,9 @@ struct tg3 {
2079 * lock: Held during reset, PHY access, timer, and when 2079 * lock: Held during reset, PHY access, timer, and when
2080 * updating tg3_flags and tg3_flags2. 2080 * updating tg3_flags and tg3_flags2.
2081 * 2081 *
2082 * tx_lock: Held during tg3_start_xmit and tg3_tx only 2082 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
2083 * when calling netif_[start|stop]_queue. 2083 * netif_tx_lock when it needs to call
2084 * tg3_start_xmit is protected by netif_tx_lock. 2084 * netif_wake_queue.
2085 * 2085 *
2086 * Both of these locks are to be held with BH safety. 2086 * Both of these locks are to be held with BH safety.
2087 * 2087 *
@@ -2118,8 +2118,6 @@ struct tg3 {
2118 u32 tx_cons; 2118 u32 tx_cons;
2119 u32 tx_pending; 2119 u32 tx_pending;
2120 2120
2121 spinlock_t tx_lock;
2122
2123 struct tg3_tx_buffer_desc *tx_ring; 2121 struct tg3_tx_buffer_desc *tx_ring;
2124 struct tx_ring_info *tx_buffers; 2122 struct tx_ring_info *tx_buffers;
2125 dma_addr_t tx_desc_mapping; 2123 dma_addr_t tx_desc_mapping;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9f491563944..4470025ff7f 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
140 140
141/* version and credits */ 141/* version and credits */
142#ifndef PCMCIA 142#ifndef PCMCIA
143static char version[] __initdata = 143static char version[] __devinitdata =
144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" 144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" 145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" 146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
216static int __devinitdata turbo_searched = 0; 216static int __devinitdata turbo_searched = 0;
217 217
218#ifndef PCMCIA 218#ifndef PCMCIA
219static __u32 ibmtr_mem_base __initdata = 0xd0000; 219static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
220#endif 220#endif
221 221
222static void __devinit PrtChanID(char *pcid, short stride) 222static void __devinit PrtChanID(char *pcid, short stride)
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cd2e0251e2b..85a7f797d34 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0);
5666module_param_array(irq, int, NULL, 0); 5666module_param_array(irq, int, NULL, 0);
5667module_param(ringspeed, int, 0); 5667module_param(ringspeed, int, 0);
5668 5668
5669static struct net_device *setup_card(int n) 5669static struct net_device * __init setup_card(int n)
5670{ 5670{
5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local)); 5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
5672 int err; 5672 int err;
@@ -5696,9 +5696,8 @@ out:
5696 free_netdev(dev); 5696 free_netdev(dev);
5697 return ERR_PTR(err); 5697 return ERR_PTR(err);
5698} 5698}
5699
5700 5699
5701int init_module(void) 5700int __init init_module(void)
5702{ 5701{
5703 int i, found = 0; 5702 int i, found = 0;
5704 struct net_device *dev; 5703 struct net_device *dev;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 7f414815cc6..eba9083da14 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
138#include <asm/irq.h> 138#include <asm/irq.h>
139 139
140/* These identify the driver base version and may not be removed. */ 140/* These identify the driver base version and may not be removed. */
141static char version[] __devinitdata = 141static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
143KERN_INFO " http://www.scyld.com/network/drivers.html\n"; 143KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144 144
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index f874e4f6ccf..cf43390d2c8 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1264 1264
1265static int __init xircom_init(void) 1265static int __init xircom_init(void)
1266{ 1266{
1267 pci_register_driver(&xircom_ops); 1267 return pci_register_driver(&xircom_ops);
1268 return 0;
1269} 1268}
1270 1269
1271static void __exit xircom_exit(void) 1270static void __exit xircom_exit(void)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 00000000000..47f49ef72bd
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4278 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * QE UCC Gigabit Ethernet Driver
8 *
9 * Changelog:
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/dma-mapping.h>
32#include <linux/fsl_devices.h>
33#include <linux/ethtool.h>
34#include <linux/platform_device.h>
35#include <linux/mii.h>
36
37#include <asm/uaccess.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/immap_qe.h>
41#include <asm/qe.h>
42#include <asm/ucc.h>
43#include <asm/ucc_fast.h>
44
45#include "ucc_geth.h"
46#include "ucc_geth_phy.h"
47
48#undef DEBUG
49
50#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
51#define DRV_NAME "ucc_geth"
52
53#define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
55
56#define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58#define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60#define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62#define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
64
65#ifdef UGETH_VERBOSE_DEBUG
66#define ugeth_vdbg ugeth_dbg
67#else
68#define ugeth_vdbg(fmt, args...) do { } while (0)
69#endif /* UGETH_VERBOSE_DEBUG */
70
71static DEFINE_SPINLOCK(ugeth_lock);
72
73static ucc_geth_info_t ugeth_primary_info = {
74 .uf_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78/* FIXME: should be changed in run time for 1G and 100M */
79#ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
86#else
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
93#endif
94 .ufpt = 256,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
101 },
102 .numQueuesTx = 1,
103 .numQueuesRx = 1,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
110 .mblinterval = 128,
111 .nortsrbytetime = 5,
112 .fracsiz = 1,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
115 .excessDefer = 1,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
121 .prel = 7,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
124 .maxD1Length = 1520,
125 .maxD2Length = 1520,
126 .vlantype = 0x8100,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
131 .bdRingLenTx = {
132 TX_BD_RING_LEN,
133 TX_BD_RING_LEN,
134 TX_BD_RING_LEN,
135 TX_BD_RING_LEN,
136 TX_BD_RING_LEN,
137 TX_BD_RING_LEN,
138 TX_BD_RING_LEN,
139 TX_BD_RING_LEN},
140
141 .bdRingLenRx = {
142 RX_BD_RING_LEN,
143 RX_BD_RING_LEN,
144 RX_BD_RING_LEN,
145 RX_BD_RING_LEN,
146 RX_BD_RING_LEN,
147 RX_BD_RING_LEN,
148 RX_BD_RING_LEN,
149 RX_BD_RING_LEN},
150
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164};
165
166static ucc_geth_info_t ugeth_info[8];
167
168#ifdef DEBUG
169static void mem_disp(u8 *addr, int size)
170{
171 u8 *i;
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
174 int notAlign = 0;
175 if (size % 16)
176 notAlign = 1;
177
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
180 (u32) i,
181 *((u32 *) (i)),
182 *((u32 *) (i + 4)),
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
184 if (notAlign == 1)
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
190 if (notAlign == 1)
191 printk("\r\n");
192}
193#endif /* DEBUG */
194
195#ifdef CONFIG_UGETH_FILTERING
196static void enqueue(struct list_head *node, struct list_head *lh)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
203}
204#endif /* CONFIG_UGETH_FILTERING */
205
206static struct list_head *dequeue(struct list_head *lh)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
213 list_del(node);
214 spin_unlock_irqrestore(ugeth_lock, flags);
215 return node;
216 } else {
217 spin_unlock_irqrestore(ugeth_lock, flags);
218 return NULL;
219 }
220}
221
222static int get_interface_details(enet_interface_e enet_interface,
223 enet_speed_e *speed,
224 int *r10m,
225 int *rmm,
226 int *rpm,
227 int *tbi, int *limited_to_full_duplex)
228{
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
232 case ENET_10_MII:
233 *speed = ENET_SPEED_10BT;
234 break;
235 case ENET_10_RMII:
236 *speed = ENET_SPEED_10BT;
237 *r10m = 1;
238 *rmm = 1;
239 break;
240 case ENET_10_RGMII:
241 *speed = ENET_SPEED_10BT;
242 *rpm = 1;
243 *r10m = 1;
244 *limited_to_full_duplex = 1;
245 break;
246 case ENET_100_MII:
247 *speed = ENET_SPEED_100BT;
248 break;
249 case ENET_100_RMII:
250 *speed = ENET_SPEED_100BT;
251 *rmm = 1;
252 break;
253 case ENET_100_RGMII:
254 *speed = ENET_SPEED_100BT;
255 *rpm = 1;
256 *limited_to_full_duplex = 1;
257 break;
258 case ENET_1000_GMII:
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
261 break;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
264 *rpm = 1;
265 *limited_to_full_duplex = 1;
266 break;
267 case ENET_1000_TBI:
268 *speed = ENET_SPEED_1000BT;
269 *tbi = 1;
270 *limited_to_full_duplex = 1;
271 break;
272 case ENET_1000_RTBI:
273 *speed = ENET_SPEED_1000BT;
274 *rpm = 1;
275 *tbi = 1;
276 *limited_to_full_duplex = 1;
277 break;
278 default:
279 return -EINVAL;
280 break;
281 }
282
283 return 0;
284}
285
286static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
287{
288 struct sk_buff *skb = NULL;
289
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
292
293 if (skb == NULL)
294 return NULL;
295
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
298 */
299 skb_reserve(skb,
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 1)));
303
304 skb->dev = ugeth->dev;
305
306 BD_BUFFER_SET(bd,
307 dma_map_single(NULL,
308 skb->data,
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 DMA_FROM_DEVICE));
312
313 BD_STATUS_AND_LENGTH_SET(bd,
314 (R_E | R_I |
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
316
317 return skb;
318}
319
320static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
321{
322 u8 *bd;
323 u32 bd_status;
324 struct sk_buff *skb;
325 int i;
326
327 bd = ugeth->p_rx_bd_ring[rxQ];
328 i = 0;
329
330 do {
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
333
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
336 return -ENOMEM;
337
338 ugeth->rx_skbuff[rxQ][i] = skb;
339
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
342 i++;
343 } while (!(bd_status & R_W));
344
345 return 0;
346}
347
348static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
350 u8 num_entries,
351 u32 thread_size,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
355{
356 u32 init_enet_offset;
357 u8 i;
358 int snum;
359
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
363 return snum;
364 }
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
368 else {
369 init_enet_offset =
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
372 ugeth_err
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
375 return -ENOMEM;
376 }
377 }
378 *(p_start++) =
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
380 | risc;
381 }
382
383 return 0;
384}
385
386static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
388 u8 num_entries,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
391{
392 u32 init_enet_offset;
393 u8 i;
394 int snum;
395
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
400 snum =
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
406 init_enet_offset =
407 (in_be32(p_start) &
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
410 }
411 *(p_start++) = 0; /* Just for cosmetics */
412 }
413 }
414
415 return 0;
416}
417
418#ifdef DEBUG
419static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
421 u8 num_entries,
422 u32 thread_size,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
425{
426 u32 init_enet_offset;
427 u8 i;
428 int snum;
429
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
434 snum =
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
440 init_enet_offset =
441 (in_be32(p_start) &
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
445 (u32)
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
448 thread_size);
449 }
450 p_start++;
451 }
452 }
453
454 return 0;
455}
456#endif
457
458#ifdef CONFIG_UGETH_FILTERING
459static enet_addr_container_t *get_enet_addr_container(void)
460{
461 enet_addr_container_t *enet_addr_cont;
462
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
467 __FUNCTION__);
468 return NULL;
469 }
470
471 return enet_addr_cont;
472}
473#endif /* CONFIG_UGETH_FILTERING */
474
475static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
476{
477 kfree(enet_addr_cont);
478}
479
480#ifdef CONFIG_UGETH_FILTERING
481static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
483{
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
485
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
488 return -EINVAL;
489 }
490
491 p_82xx_addr_filt =
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
493 addressfiltering;
494
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
506
507 return 0;
508}
509#endif /* CONFIG_UGETH_FILTERING */
510
511static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
512{
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
514
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 p_82xx_addr_filt =
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
522 addressfiltering;
523
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
529
530 return 0;
531}
532
533static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
535{
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
537 u32 cecr_subblock;
538
539 p_82xx_addr_filt =
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
541 addressfiltering;
542
543 cecr_subblock =
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
545
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
558
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
561}
562
563#ifdef CONFIG_UGETH_MAGIC_PACKET
564static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
565{
566 ucc_fast_private_t *uccf;
567 ucc_geth_t *ug_regs;
568 u32 maccfg2, uccm;
569
570 uccf = ugeth->uccf;
571 ug_regs = ugeth->ug_regs;
572
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
575 uccm |= UCCE_MPD;
576 out_be32(uccf->p_uccm, uccm);
577
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
582}
583
584static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
585{
586 ucc_fast_private_t *uccf;
587 ucc_geth_t *ug_regs;
588 u32 maccfg2, uccm;
589
590 uccf = ugeth->uccf;
591 ug_regs = ugeth->ug_regs;
592
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
595 uccm &= ~UCCE_MPD;
596 out_be32(uccf->p_uccm, uccm);
597
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
602}
603#endif /* MAGIC_PACKET */
604
605static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
606{
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
608}
609
610#ifdef DEBUG
611static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
617{
618 ucc_fast_t *uf_regs;
619 ucc_geth_t *ug_regs;
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
622
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
627
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
655 }
656
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
660 int i;
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
707 }
708
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
727 }
728}
729
730static void dump_bds(ucc_geth_private_t *ugeth)
731{
732 int i;
733 int length;
734
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
737 length =
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
742 }
743 }
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
746 length =
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
751 }
752 }
753}
754
755static void dump_regs(ucc_geth_private_t *ugeth)
756{
757 int i;
758
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
761
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
864
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
870 break;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
873 break;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
876 break;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
879 break;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
882 break;
883 default:
884 numThreadsTxNumerical = 0;
885 break;
886 }
887
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
897 }
898 }
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
904 break;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
907 break;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
910 break;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
913 break;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
916 break;
917 default:
918 numThreadsRxNumerical = 0;
919 break;
920 }
921
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
931 }
932 }
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
939 }
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1010 }
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1090 ugeth_info
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1092 i,
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1098 }
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1109 }
1110 }
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1116 }
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1123 }
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1130 }
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1140 ugeth_info
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1145 coalescingentry[i].
1146 interruptcoalescingmaxvalue));
1147 ugeth_info
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1152 coalescingentry[i].
1153 interruptcoalescingcounter));
1154 }
1155 }
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1163 ugeth_info
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1167 ugeth_info
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1171 ugeth_info
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1176 ugeth_info
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1182 (u32)
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1185 bdbaseptr)));
1186 mem_disp((u8 *)
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1189 bdbaseptr)),
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1191 }
1192 }
1193 if (ugeth->p_init_enet_param_shadow) {
1194 int size;
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1200
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1203 size +=
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1207 size +=
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1211 size +=
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1213 }
1214
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1217 txthread[0]),
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1223 rxthread[0]),
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1226 }
1227}
1228#endif /* DEBUG */
1229
1230static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1233{
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1237}
1238
1239static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1241 int no_backoff,
1242 int excess_defer,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1247{
1248 u32 value = 0;
1249
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1253 return -EINVAL;
1254
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1256
1257 if (alt_beb)
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1261 if (no_backoff)
1262 value |= HALFDUP_NO_BACKOFF;
1263 if (excess_defer)
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1265
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1267
1268 value |= collision_window;
1269
1270 out_be32(hafdup_register, value);
1271 return 0;
1272}
1273
1274static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1275 u8 non_btb_ipg,
1276 u8 min_ifg,
1277 u8 btb_ipg,
1278 volatile u32 *ipgifg_register)
1279{
1280 u32 value = 0;
1281
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1283 IPG part 2 */
1284 if (non_btb_cs_ipg > non_btb_ipg)
1285 return -EINVAL;
1286
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1291 return -EINVAL;
1292
1293 value |=
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1296 value |=
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1299 value |=
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1303
1304 out_be32(ipgifg_register, value);
1305 return 0;
1306}
1307
1308static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1311 u16 pause_period,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1316{
1317 u32 value = 0;
1318
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1323
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1328
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1335
1336 return 0;
1337}
1338
1339static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1343{
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1351 }
1352
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1357 if requested */
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1361
1362 return 0;
1363}
1364
1365static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1374{
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1377 u16 temoder_value;
1378 u32 remoder_value;
1379
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1386 }
1387
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1394 }
1395
1396 return 0;
1397}
1398
1399static int init_mac_station_addr_regs(u8 address_byte_0,
1400 u8 address_byte_1,
1401 u8 address_byte_2,
1402 u8 address_byte_3,
1403 u8 address_byte_4,
1404 u8 address_byte_5,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1407{
1408 u32 value = 0;
1409
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1412
1413 /* MACSTNADDR1 Register: */
1414
1415 /* 0 7 8 15 */
1416 /* station address byte 5 station address byte 4 */
1417 /* 16 23 24 31 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1423
1424 out_be32(macstnaddr1_register, value);
1425
1426 /* MACSTNADDR2 Register: */
1427
1428 /* 0 7 8 15 */
1429 /* station address byte 1 station address byte 0 */
1430 /* 16 23 24 31 */
1431 /* reserved reserved */
1432 value = 0;
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1435
1436 out_be32(macstnaddr2_register, value);
1437
1438 return 0;
1439}
1440
1441static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1444{
1445 u32 value = 0;
1446
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1449 return -EINVAL;
1450
1451 value = in_be32(maccfg2_register);
1452
1453 if (full_duplex)
1454 value |= MACCFG2_FDX;
1455 else
1456 value &= ~MACCFG2_FDX;
1457
1458 out_be32(maccfg2_register, value);
1459 return 0;
1460}
1461
1462static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1464{
1465 u32 value = 0;
1466
1467 value = in_be32(maccfg2_register);
1468
1469 if (length_check)
1470 value |= MACCFG2_LC;
1471 else
1472 value &= ~MACCFG2_LC;
1473
1474 out_be32(maccfg2_register, value);
1475 return 0;
1476}
1477
1478static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1480{
1481 u32 value = 0;
1482
1483 if ((preamble_length < 3) || (preamble_length > 7))
1484 return -EINVAL;
1485
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1490 return 0;
1491}
1492
1493static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1497{
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1499 u32 value = 0;
1500
1501 value = in_be32(miimcfg_register);
1502 if (reset_mgmt) {
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1505 }
1506
1507 value = 0;
1508
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1511
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1514
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1517 cpu_relax();
1518
1519 if (timeout <= 0) {
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1521 return -ETIMEDOUT;
1522 }
1523
1524 return 0;
1525}
1526
1527static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1530{
1531 u32 value = 0;
1532
1533 value = in_be32(upsmr_register);
1534
1535 if (reject_broadcast)
1536 value |= UPSMR_BRO;
1537 else
1538 value &= ~UPSMR_BRO;
1539
1540 if (receive_short_frames)
1541 value |= UPSMR_RSH;
1542 else
1543 value &= ~UPSMR_RSH;
1544
1545 if (promiscuous)
1546 value |= UPSMR_PRO;
1547 else
1548 value &= ~UPSMR_PRO;
1549
1550 out_be32(upsmr_register, value);
1551
1552 return 0;
1553}
1554
1555static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1557{
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1561 return -EINVAL;
1562
1563 out_be16(mrblr_register, max_rx_buf_len);
1564 return 0;
1565}
1566
1567static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1570{
1571 u16 mrblr_value = 0;
1572
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1575 return -EINVAL;
1576
1577 out_be16(minflr_register, min_frame_length);
1578 return 0;
1579}
1580
1581static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1582{
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1586 enet_speed_e speed;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1590 u16 value;
1591
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1593
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1597
1598 /* Analyze enet_interface according to Interface Mode Configuration
1599 table */
1600 ret_val =
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1603 if (ret_val != 0) {
1604 ugeth_err
1605 ("%s: half duplex not supported in requested configuration.",
1606 __FUNCTION__);
1607 return ret_val;
1608 }
1609
1610 /* Set MACCFG2 */
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1619
1620 /* Set UPSMR */
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1623 if (rpm)
1624 upsmr |= UPSMR_RPM;
1625 if (r10m)
1626 upsmr |= UPSMR_R10M;
1627 if (tbi)
1628 upsmr |= UPSMR_TBIM;
1629 if (rmm)
1630 upsmr |= UPSMR_RMM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1632
1633 /* Set UTBIPAR */
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1636 if (tbi)
1637 utbipar |=
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1641 else
1642 utbipar |=
1643 (0x10 +
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1647
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1651 if (tbi) {
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1655 value =
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1657 ENET_TBI_MII_CR);
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1661 }
1662
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1665 &ug_regs->maccfg2);
1666 if (ret_val != 0) {
1667 ugeth_err
1668 ("%s: half duplex not supported in requested configuration.",
1669 __FUNCTION__);
1670 return ret_val;
1671 }
1672
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1674
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1676 if (ret_val != 0) {
1677 ugeth_err
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1679 __FUNCTION__);
1680 return ret_val;
1681 }
1682
1683 return 0;
1684}
1685
1686/* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1691 */
1692static void adjust_link(struct net_device *dev)
1693{
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1696 u32 tempval;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1698
1699 ug_regs = ugeth->ug_regs;
1700
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1709
1710 ugeth_info("%s: Half Duplex", dev->name);
1711 } else {
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1715
1716 ugeth_info("%s: Full Duplex", dev->name);
1717 }
1718
1719 ugeth->oldduplex = mii_info->duplex;
1720 }
1721
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1724 case 1000:
1725#ifdef CONFIG_MPC836x
1726/* FIXME: This code is for 100Mbs BUG fixing,
1727remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1729 ENET_1000_GMII)
1730 /* Run the commands which initialize the PHY */
1731 {
1732 tempval =
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1735 tempval |= 0x000f;
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1738 (u16) tempval);
1739 tempval =
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1742 MII_BMCR);
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1747 ENET_1000_RGMII)
1748 /* Run the commands which initialize the PHY */
1749 {
1750 tempval =
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1756 (u16) tempval);
1757 tempval =
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1760 MII_BMCR);
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1764 }
1765 msleep(4000);
1766#endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1768 break;
1769 case 100:
1770 case 10:
1771#ifdef CONFIG_MPC836x
1772/* FIXME: This code is for 100Mbs BUG fixing,
1773remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1775 tempval =
1776 (u32) mii_info->mdio_read(ugeth->dev,
1777 mii_info->mii_id,
1778 0x1b);
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1782 (u16) tempval);
1783 tempval =
1784 (u32) mii_info->mdio_read(ugeth->dev,
1785 mii_info->mii_id,
1786 MII_BMCR);
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1789 (u16) (tempval |
1790 BMCR_RESET));
1791 msleep(4000);
1792#endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1794 break;
1795 default:
1796 ugeth_warn
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1799 break;
1800 }
1801
1802 ugeth_info("%s: Speed %dBT", dev->name,
1803 mii_info->speed);
1804
1805 ugeth->oldspeed = mii_info->speed;
1806 }
1807
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1810 ugeth->oldlink = 1;
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1813 }
1814 } else {
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1817 ugeth->oldlink = 0;
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1821 }
1822 }
1823}
1824
1825/* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1827 */
1828static int init_phy(struct net_device *dev)
1829{
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1834 int err;
1835
1836 mii_regs = &ugeth->ug_regs->miimng;
1837
1838 ugeth->oldlink = 0;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1841
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1843
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1846 return -ENOMEM;
1847 }
1848
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1853 mii_info->link = 0;
1854
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1861
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1863
1864 mii_info->dev = dev;
1865
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1868
1869 ugeth->mii_info = mii_info;
1870
1871 spin_lock_irq(&ugeth->lock);
1872
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1875
1876 if (init_mii_management_configuration(1,
1877 ugeth->ug_info->
1878 miiPreambleSupress,
1879 &mii_regs->miimcfg,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1882 err = -1;
1883 goto bus_fail;
1884 }
1885
1886 spin_unlock_irq(&ugeth->lock);
1887
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1890
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1893 err = -1;
1894 goto no_phy;
1895 }
1896
1897 mii_info->phyinfo = curphy;
1898
1899 /* Run the commands which initialize the PHY */
1900 if (curphy->init) {
1901 err = curphy->init(ugeth->mii_info);
1902 if (err)
1903 goto phy_init_fail;
1904 }
1905
1906 return 0;
1907
1908 phy_init_fail:
1909 no_phy:
1910 bus_fail:
1911 kfree(mii_info);
1912
1913 return err;
1914}
1915
1916#ifdef CONFIG_UGETH_TX_ON_DEMOND
1917static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1918{
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1920
1921 return 0;
1922}
1923#endif
1924
1925static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1926{
1927 ucc_fast_private_t *uccf;
1928 u32 cecr_subblock;
1929 u32 temp;
1930
1931 uccf = ugeth->uccf;
1932
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1935 temp &= ~UCCE_GRA;
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1938
1939 /* Issue host command */
1940 cecr_subblock =
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1944
1945 /* Wait for command to complete */
1946 do {
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1949
1950 uccf->stopped_tx = 1;
1951
1952 return 0;
1953}
1954
1955static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1956{
1957 ucc_fast_private_t *uccf;
1958 u32 cecr_subblock;
1959 u8 temp;
1960
1961 uccf = ugeth->uccf;
1962
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1967
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1970 do {
1971 /* Issue host command */
1972 cecr_subblock =
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1974 ucc_num);
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1977
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1980
1981 uccf->stopped_rx = 1;
1982
1983 return 0;
1984}
1985
1986static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1987{
1988 ucc_fast_private_t *uccf;
1989 u32 cecr_subblock;
1990
1991 uccf = ugeth->uccf;
1992
1993 cecr_subblock =
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1996 0);
1997 uccf->stopped_tx = 0;
1998
1999 return 0;
2000}
2001
2002static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2003{
2004 ucc_fast_private_t *uccf;
2005 u32 cecr_subblock;
2006
2007 uccf = ugeth->uccf;
2008
2009 cecr_subblock =
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2012 0);
2013 uccf->stopped_rx = 0;
2014
2015 return 0;
2016}
2017
2018static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2019{
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2022
2023 uccf = ugeth->uccf;
2024
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2028 return -EINVAL;
2029 }
2030
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2033
2034 /* Get Tx and Rx going again, in case this channel was actively
2035 disabled. */
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2040
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2042
2043 return 0;
2044
2045}
2046
2047static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2048{
2049 ucc_fast_private_t *uccf;
2050
2051 uccf = ugeth->uccf;
2052
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2056 return -EINVAL;
2057 }
2058
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2062
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2066
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2068
2069 return 0;
2070}
2071
2072static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2073{
2074#ifdef DEBUG
2075 ucc_fast_dump_regs(ugeth->uccf);
2076 dump_regs(ugeth);
2077 dump_bds(ugeth);
2078#endif
2079}
2080
2081#ifdef CONFIG_UGETH_FILTERING
2082static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2083 p_UccGethTadParams,
2084 qe_fltr_tad_t *qe_fltr_tad)
2085{
2086 u16 temp;
2087
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2090
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2096 )
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2100 temp =
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2104
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2111
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2114 /* upper bits */
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2116 /* lower bits */
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2118
2119 return 0;
2120}
2121
2122static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2125{
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2128 u16 i, num;
2129 int32_t j;
2130 u8 *p_counter;
2131
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2135 } else {
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2138 }
2139
2140 if (!p_lh)
2141 return NULL;
2142
2143 num = *p_counter;
2144
2145 for (i = 0; i < num; i++) {
2146 enet_addr_cont =
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2151 break;
2152 if (j == 0)
2153 return enet_addr_cont; /* Found */
2154 }
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2156 }
2157 return NULL;
2158}
2159
2160static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2162{
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2166 u8 i;
2167 u32 limit;
2168 u8 *p_counter;
2169
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2173 location =
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2176 } else {
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2179 location =
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2182 }
2183
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2187 return 0;
2188 }
2189 if ((!p_lh) || (!(*p_counter < limit)))
2190 return -EBUSY;
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2192 return -ENOMEM;
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2197 ++(*p_counter);
2198
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2200
2201 return 0;
2202}
2203
2204static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2206{
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2211 u16 i, num;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2214 u8 *p_counter;
2215
2216 uccf = ugeth->uccf;
2217
2218 p_82xx_addr_filt =
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2220 addressfiltering;
2221
2222 if (!
2223 (enet_addr_cont =
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2225 return -ENOENT;
2226
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2230
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2236 } else {
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2241 }
2242
2243 comm_dir = 0;
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2248 if (comm_dir)
2249 ugeth_disable(ugeth, comm_dir);
2250
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2254
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2258 enet_addr_cont =
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2263 }
2264
2265 if (comm_dir)
2266 ugeth_enable(ugeth, comm_dir);
2267
2268 return 0;
2269}
2270#endif /* CONFIG_UGETH_FILTERING */
2271
2272static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2273 ugeth,
2274 enet_addr_type_e
2275 enet_addr_type)
2276{
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2281 u16 i, num;
2282 u32 *addr_h, *addr_l;
2283 u8 *p_counter;
2284
2285 uccf = ugeth->uccf;
2286
2287 p_82xx_addr_filt =
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2289 addressfiltering;
2290
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2301 } else
2302 return -EINVAL;
2303
2304 comm_dir = 0;
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2309 if (comm_dir)
2310 ugeth_disable(ugeth, comm_dir);
2311
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2315
2316 if (!p_lh)
2317 return 0;
2318
2319 num = *p_counter;
2320
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2324
2325 *p_counter = 0;
2326
2327 if (comm_dir)
2328 ugeth_enable(ugeth, comm_dir);
2329
2330 return 0;
2331}
2332
2333#ifdef CONFIG_UGETH_FILTERING
2334static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2336 u8 paddr_num)
2337{
2338 int i;
2339
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2341 ugeth_warn
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2344 __FUNCTION__);
2345
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2352}
2353#endif /* CONFIG_UGETH_FILTERING */
2354
2355static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2356 u8 paddr_num)
2357{
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2360}
2361
2362static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2363{
2364 u16 i, j;
2365 u8 *bd;
2366
2367 if (!ugeth)
2368 return;
2369
2370 if (ugeth->uccf)
2371 ucc_fast_free(ugeth->uccf);
2372
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2376 }
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2380 }
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2384 }
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2388 }
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2392 }
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2396 }
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2400 }
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2404 }
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2408 }
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2412 }
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2416 }
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2420 rxthread[0]),
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2425 txthread[0]),
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2430 }
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2436 BD_BUFFER_ARG(bd),
2437 (BD_STATUS_AND_LENGTH(bd) &
2438 BD_LENGTH_MASK),
2439 DMA_TO_DEVICE);
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2442 }
2443 }
2444
2445 kfree(ugeth->tx_skbuff[i]);
2446
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2449 MEM_PART_SYSTEM)
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2452 MEM_PART_MURAM)
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2455 }
2456 }
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2464 ugeth->ug_info->
2465 uf_info.
2466 max_rx_buf_length +
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2468 DMA_FROM_DEVICE);
2469
2470 dev_kfree_skb_any(ugeth->
2471 rx_skbuff[i][j]);
2472 ugeth->rx_skbuff[i][j] = NULL;
2473 }
2474 bd += UCC_GETH_SIZE_OF_BD;
2475 }
2476
2477 kfree(ugeth->rx_skbuff[i]);
2478
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2480 MEM_PART_SYSTEM)
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2483 MEM_PART_MURAM)
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2486 }
2487 }
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2494
2495}
2496
2497static void ucc_geth_set_multi(struct net_device *dev)
2498{
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2504 u8 *mcptr, *tdptr;
2505 int i, j;
2506
2507 ugeth = netdev_priv(dev);
2508
2509 uf_regs = ugeth->uccf->uf_regs;
2510
2511 if (dev->flags & IFF_PROMISC) {
2512
2513 /* Log any net taps. */
2514 printk("%s: Promiscuous mode enabled.\n", dev->name);
2515 uf_regs->upsmr |= UPSMR_PRO;
2516
2517 } else {
2518
2519 uf_regs->upsmr &= ~UPSMR_PRO;
2520
2521 p_82xx_addr_filt =
2522 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2523 p_rx_glbl_pram->addressfiltering;
2524
2525 if (dev->flags & IFF_ALLMULTI) {
2526 /* Catch all multicast addresses, so set the
2527 * filter to all 1's.
2528 */
2529 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2530 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2531 } else {
2532 /* Clear filter and add the addresses in the list.
2533 */
2534 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2535 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2536
2537 dmi = dev->mc_list;
2538
2539 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2540
2541 /* Only support group multicast for now.
2542 */
2543 if (!(dmi->dmi_addr[0] & 1))
2544 continue;
2545
2546 /* The address in dmi_addr is LSB first,
2547 * and taddr is MSB first. We have to
2548 * copy bytes MSB first from dmi_addr.
2549 */
2550 mcptr = (u8 *) dmi->dmi_addr + 5;
2551 tdptr = (u8 *) & tempaddr;
2552 for (j = 0; j < 6; j++)
2553 *tdptr++ = *mcptr--;
2554
2555 /* Ask CPM to run CRC and set bit in
2556 * filter mask.
2557 */
2558 hw_add_addr_in_hash(ugeth, &tempaddr);
2559
2560 }
2561 }
2562 }
2563}
2564
2565static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2566{
2567 ucc_geth_t *ug_regs = ugeth->ug_regs;
2568 u32 tempval;
2569
2570 ugeth_vdbg("%s: IN", __FUNCTION__);
2571
2572 /* Disable the controller */
2573 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2574
2575 /* Tell the kernel the link is down */
2576 ugeth->mii_info->link = 0;
2577 adjust_link(ugeth->dev);
2578
2579 /* Mask all interrupts */
2580 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2581
2582 /* Clear all interrupts */
2583 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2584
2585 /* Disable Rx and Tx */
2586 tempval = in_be32(&ug_regs->maccfg1);
2587 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2588 out_be32(&ug_regs->maccfg1, tempval);
2589
2590 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2591 /* Clear any pending interrupts */
2592 mii_clear_phy_interrupt(ugeth->mii_info);
2593
2594 /* Disable PHY Interrupts */
2595 mii_configure_phy_interrupt(ugeth->mii_info,
2596 MII_INTERRUPT_DISABLED);
2597 }
2598
2599 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2600
2601 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2602 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2603 } else {
2604 del_timer_sync(&ugeth->phy_info_timer);
2605 }
2606
2607 ucc_geth_memclean(ugeth);
2608}
2609
2610static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2611{
2612 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2613 ucc_geth_init_pram_t *p_init_enet_pram;
2614 ucc_fast_private_t *uccf;
2615 ucc_geth_info_t *ug_info;
2616 ucc_fast_info_t *uf_info;
2617 ucc_fast_t *uf_regs;
2618 ucc_geth_t *ug_regs;
2619 int ret_val = -EINVAL;
2620 u32 remoder = UCC_GETH_REMODER_INIT;
2621 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2622 u32 ifstat, i, j, size, l2qt, l3qt, length;
2623 u16 temoder = UCC_GETH_TEMODER_INIT;
2624 u16 test;
2625 u8 function_code = 0;
2626 u8 *bd, *endOfRing;
2627 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2628
2629 ugeth_vdbg("%s: IN", __FUNCTION__);
2630
2631 ug_info = ugeth->ug_info;
2632 uf_info = &ug_info->uf_info;
2633
2634 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2635 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2636 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2637 return -EINVAL;
2638 }
2639
2640 /* Rx BD lengths */
2641 for (i = 0; i < ug_info->numQueuesRx; i++) {
2642 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2643 (ug_info->bdRingLenRx[i] %
2644 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2645 ugeth_err
2646 ("%s: Rx BD ring length must be multiple of 4,"
2647 " no smaller than 8.", __FUNCTION__);
2648 return -EINVAL;
2649 }
2650 }
2651
2652 /* Tx BD lengths */
2653 for (i = 0; i < ug_info->numQueuesTx; i++) {
2654 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2655 ugeth_err
2656 ("%s: Tx BD ring length must be no smaller than 2.",
2657 __FUNCTION__);
2658 return -EINVAL;
2659 }
2660 }
2661
2662 /* mrblr */
2663 if ((uf_info->max_rx_buf_length == 0) ||
2664 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2665 ugeth_err
2666 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2667 __FUNCTION__);
2668 return -EINVAL;
2669 }
2670
2671 /* num Tx queues */
2672 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2673 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2674 return -EINVAL;
2675 }
2676
2677 /* num Rx queues */
2678 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2679 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2680 return -EINVAL;
2681 }
2682
2683 /* l2qt */
2684 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2685 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2686 ugeth_err
2687 ("%s: VLAN priority table entry must not be"
2688 " larger than number of Rx queues.",
2689 __FUNCTION__);
2690 return -EINVAL;
2691 }
2692 }
2693
2694 /* l3qt */
2695 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2696 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2697 ugeth_err
2698 ("%s: IP priority table entry must not be"
2699 " larger than number of Rx queues.",
2700 __FUNCTION__);
2701 return -EINVAL;
2702 }
2703 }
2704
2705 if (ug_info->cam && !ug_info->ecamptr) {
2706 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2707 __FUNCTION__);
2708 return -EINVAL;
2709 }
2710
2711 if ((ug_info->numStationAddresses !=
2712 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2713 && ug_info->rxExtendedFiltering) {
2714 ugeth_err("%s: Number of station addresses greater than 1 "
2715 "not allowed in extended parsing mode.",
2716 __FUNCTION__);
2717 return -EINVAL;
2718 }
2719
2720 /* Generate uccm_mask for receive */
2721 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2722 for (i = 0; i < ug_info->numQueuesRx; i++)
2723 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2724
2725 for (i = 0; i < ug_info->numQueuesTx; i++)
2726 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2727 /* Initialize the general fast UCC block. */
2728 if (ucc_fast_init(uf_info, &uccf)) {
2729 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2730 ucc_geth_memclean(ugeth);
2731 return -ENOMEM;
2732 }
2733 ugeth->uccf = uccf;
2734
2735 switch (ug_info->numThreadsRx) {
2736 case UCC_GETH_NUM_OF_THREADS_1:
2737 numThreadsRxNumerical = 1;
2738 break;
2739 case UCC_GETH_NUM_OF_THREADS_2:
2740 numThreadsRxNumerical = 2;
2741 break;
2742 case UCC_GETH_NUM_OF_THREADS_4:
2743 numThreadsRxNumerical = 4;
2744 break;
2745 case UCC_GETH_NUM_OF_THREADS_6:
2746 numThreadsRxNumerical = 6;
2747 break;
2748 case UCC_GETH_NUM_OF_THREADS_8:
2749 numThreadsRxNumerical = 8;
2750 break;
2751 default:
2752 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2753 ucc_geth_memclean(ugeth);
2754 return -EINVAL;
2755 break;
2756 }
2757
2758 switch (ug_info->numThreadsTx) {
2759 case UCC_GETH_NUM_OF_THREADS_1:
2760 numThreadsTxNumerical = 1;
2761 break;
2762 case UCC_GETH_NUM_OF_THREADS_2:
2763 numThreadsTxNumerical = 2;
2764 break;
2765 case UCC_GETH_NUM_OF_THREADS_4:
2766 numThreadsTxNumerical = 4;
2767 break;
2768 case UCC_GETH_NUM_OF_THREADS_6:
2769 numThreadsTxNumerical = 6;
2770 break;
2771 case UCC_GETH_NUM_OF_THREADS_8:
2772 numThreadsTxNumerical = 8;
2773 break;
2774 default:
2775 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2776 ucc_geth_memclean(ugeth);
2777 return -EINVAL;
2778 break;
2779 }
2780
2781 /* Calculate rx_extended_features */
2782 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2783 ug_info->ipAddressAlignment ||
2784 (ug_info->numStationAddresses !=
2785 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2786
2787 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2788 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2789 || (ug_info->vlanOperationNonTagged !=
2790 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2791
2792 uf_regs = uccf->uf_regs;
2793 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2794 ugeth->ug_regs = ug_regs;
2795
2796 init_default_reg_vals(&uf_regs->upsmr,
2797 &ug_regs->maccfg1, &ug_regs->maccfg2);
2798
2799 /* Set UPSMR */
2800 /* For more details see the hardware spec. */
2801 init_rx_parameters(ug_info->bro,
2802 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2803
2804 /* We're going to ignore other registers for now, */
2805 /* except as needed to get up and running */
2806
2807 /* Set MACCFG1 */
2808 /* For more details see the hardware spec. */
2809 init_flow_control_params(ug_info->aufc,
2810 ug_info->receiveFlowControl,
2811 1,
2812 ug_info->pausePeriod,
2813 ug_info->extensionField,
2814 &uf_regs->upsmr,
2815 &ug_regs->uempr, &ug_regs->maccfg1);
2816
2817 maccfg1 = in_be32(&ug_regs->maccfg1);
2818 maccfg1 |= MACCFG1_ENABLE_RX;
2819 maccfg1 |= MACCFG1_ENABLE_TX;
2820 out_be32(&ug_regs->maccfg1, maccfg1);
2821
2822 /* Set IPGIFG */
2823 /* For more details see the hardware spec. */
2824 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2825 ug_info->nonBackToBackIfgPart2,
2826 ug_info->
2827 miminumInterFrameGapEnforcement,
2828 ug_info->backToBackInterFrameGap,
2829 &ug_regs->ipgifg);
2830 if (ret_val != 0) {
2831 ugeth_err("%s: IPGIFG initialization parameter too large.",
2832 __FUNCTION__);
2833 ucc_geth_memclean(ugeth);
2834 return ret_val;
2835 }
2836
2837 /* Set HAFDUP */
2838 /* For more details see the hardware spec. */
2839 ret_val = init_half_duplex_params(ug_info->altBeb,
2840 ug_info->backPressureNoBackoff,
2841 ug_info->noBackoff,
2842 ug_info->excessDefer,
2843 ug_info->altBebTruncation,
2844 ug_info->maxRetransmission,
2845 ug_info->collisionWindow,
2846 &ug_regs->hafdup);
2847 if (ret_val != 0) {
2848 ugeth_err("%s: Half Duplex initialization parameter too large.",
2849 __FUNCTION__);
2850 ucc_geth_memclean(ugeth);
2851 return ret_val;
2852 }
2853
2854 /* Set IFSTAT */
2855 /* For more details see the hardware spec. */
2856 /* Read only - resets upon read */
2857 ifstat = in_be32(&ug_regs->ifstat);
2858
2859 /* Clear UEMPR */
2860 /* For more details see the hardware spec. */
2861 out_be32(&ug_regs->uempr, 0);
2862
2863 /* Set UESCR */
2864 /* For more details see the hardware spec. */
2865 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2866 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2867 0, &uf_regs->upsmr, &ug_regs->uescr);
2868
2869 /* Allocate Tx bds */
2870 for (j = 0; j < ug_info->numQueuesTx; j++) {
2871 /* Allocate in multiple of
2872 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2873 according to spec */
2874 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2875 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2876 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2877 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2878 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2879 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2880 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2881 u32 align = 4;
2882 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2883 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2884 ugeth->tx_bd_ring_offset[j] =
2885 (u32) (kmalloc((u32) (length + align),
2886 GFP_KERNEL));
2887 if (ugeth->tx_bd_ring_offset[j] != 0)
2888 ugeth->p_tx_bd_ring[j] =
2889 (void*)((ugeth->tx_bd_ring_offset[j] +
2890 align) & ~(align - 1));
2891 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2892 ugeth->tx_bd_ring_offset[j] =
2893 qe_muram_alloc(length,
2894 UCC_GETH_TX_BD_RING_ALIGNMENT);
2895 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2896 ugeth->p_tx_bd_ring[j] =
2897 (u8 *) qe_muram_addr(ugeth->
2898 tx_bd_ring_offset[j]);
2899 }
2900 if (!ugeth->p_tx_bd_ring[j]) {
2901 ugeth_err
2902 ("%s: Can not allocate memory for Tx bd rings.",
2903 __FUNCTION__);
2904 ucc_geth_memclean(ugeth);
2905 return -ENOMEM;
2906 }
2907 /* Zero unused end of bd ring, according to spec */
2908 memset(ugeth->p_tx_bd_ring[j] +
2909 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2910 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2911 }
2912
2913 /* Allocate Rx bds */
2914 for (j = 0; j < ug_info->numQueuesRx; j++) {
2915 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2916 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2917 u32 align = 4;
2918 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2919 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2920 ugeth->rx_bd_ring_offset[j] =
2921 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2922 if (ugeth->rx_bd_ring_offset[j] != 0)
2923 ugeth->p_rx_bd_ring[j] =
2924 (void*)((ugeth->rx_bd_ring_offset[j] +
2925 align) & ~(align - 1));
2926 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2927 ugeth->rx_bd_ring_offset[j] =
2928 qe_muram_alloc(length,
2929 UCC_GETH_RX_BD_RING_ALIGNMENT);
2930 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2931 ugeth->p_rx_bd_ring[j] =
2932 (u8 *) qe_muram_addr(ugeth->
2933 rx_bd_ring_offset[j]);
2934 }
2935 if (!ugeth->p_rx_bd_ring[j]) {
2936 ugeth_err
2937 ("%s: Can not allocate memory for Rx bd rings.",
2938 __FUNCTION__);
2939 ucc_geth_memclean(ugeth);
2940 return -ENOMEM;
2941 }
2942 }
2943
2944 /* Init Tx bds */
2945 for (j = 0; j < ug_info->numQueuesTx; j++) {
2946 /* Setup the skbuff rings */
2947 ugeth->tx_skbuff[j] =
2948 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2949 ugeth->ug_info->bdRingLenTx[j],
2950 GFP_KERNEL);
2951
2952 if (ugeth->tx_skbuff[j] == NULL) {
2953 ugeth_err("%s: Could not allocate tx_skbuff",
2954 __FUNCTION__);
2955 ucc_geth_memclean(ugeth);
2956 return -ENOMEM;
2957 }
2958
2959 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2960 ugeth->tx_skbuff[j][i] = NULL;
2961
2962 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2963 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2964 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2965 BD_BUFFER_CLEAR(bd);
2966 BD_STATUS_AND_LENGTH_SET(bd, 0);
2967 bd += UCC_GETH_SIZE_OF_BD;
2968 }
2969 bd -= UCC_GETH_SIZE_OF_BD;
2970 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2971 }
2972
2973 /* Init Rx bds */
2974 for (j = 0; j < ug_info->numQueuesRx; j++) {
2975 /* Setup the skbuff rings */
2976 ugeth->rx_skbuff[j] =
2977 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2978 ugeth->ug_info->bdRingLenRx[j],
2979 GFP_KERNEL);
2980
2981 if (ugeth->rx_skbuff[j] == NULL) {
2982 ugeth_err("%s: Could not allocate rx_skbuff",
2983 __FUNCTION__);
2984 ucc_geth_memclean(ugeth);
2985 return -ENOMEM;
2986 }
2987
2988 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2989 ugeth->rx_skbuff[j][i] = NULL;
2990
2991 ugeth->skb_currx[j] = 0;
2992 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2993 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2994 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2995 BD_BUFFER_CLEAR(bd);
2996 bd += UCC_GETH_SIZE_OF_BD;
2997 }
2998 bd -= UCC_GETH_SIZE_OF_BD;
2999 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
3000 }
3001
3002 /*
3003 * Global PRAM
3004 */
3005 /* Tx global PRAM */
3006 /* Allocate global tx parameter RAM page */
3007 ugeth->tx_glbl_pram_offset =
3008 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3009 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3010 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3011 ugeth_err
3012 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3013 __FUNCTION__);
3014 ucc_geth_memclean(ugeth);
3015 return -ENOMEM;
3016 }
3017 ugeth->p_tx_glbl_pram =
3018 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3019 tx_glbl_pram_offset);
3020 /* Zero out p_tx_glbl_pram */
3021 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3022
3023 /* Fill global PRAM */
3024
3025 /* TQPTR */
3026 /* Size varies with number of Tx threads */
3027 ugeth->thread_dat_tx_offset =
3028 qe_muram_alloc(numThreadsTxNumerical *
3029 sizeof(ucc_geth_thread_data_tx_t) +
3030 32 * (numThreadsTxNumerical == 1),
3031 UCC_GETH_THREAD_DATA_ALIGNMENT);
3032 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3033 ugeth_err
3034 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3035 __FUNCTION__);
3036 ucc_geth_memclean(ugeth);
3037 return -ENOMEM;
3038 }
3039
3040 ugeth->p_thread_data_tx =
3041 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3042 thread_dat_tx_offset);
3043 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3044
3045 /* vtagtable */
3046 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3047 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3048 ug_info->vtagtable[i]);
3049
3050 /* iphoffset */
3051 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3052 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3053
3054 /* SQPTR */
3055 /* Size varies with number of Tx queues */
3056 ugeth->send_q_mem_reg_offset =
3057 qe_muram_alloc(ug_info->numQueuesTx *
3058 sizeof(ucc_geth_send_queue_qd_t),
3059 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3060 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3061 ugeth_err
3062 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3063 __FUNCTION__);
3064 ucc_geth_memclean(ugeth);
3065 return -ENOMEM;
3066 }
3067
3068 ugeth->p_send_q_mem_reg =
3069 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3070 send_q_mem_reg_offset);
3071 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3072
3073 /* Setup the table */
3074 /* Assume BD rings are already established */
3075 for (i = 0; i < ug_info->numQueuesTx; i++) {
3076 endOfRing =
3077 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3078 1) * UCC_GETH_SIZE_OF_BD;
3079 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3081 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3082 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3083 last_bd_completed_address,
3084 (u32) virt_to_phys(endOfRing));
3085 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3086 MEM_PART_MURAM) {
3087 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3088 (u32) immrbar_virt_to_phys(ugeth->
3089 p_tx_bd_ring[i]));
3090 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3091 last_bd_completed_address,
3092 (u32) immrbar_virt_to_phys(endOfRing));
3093 }
3094 }
3095
3096 /* schedulerbasepointer */
3097
3098 if (ug_info->numQueuesTx > 1) {
3099 /* scheduler exists only if more than 1 tx queue */
3100 ugeth->scheduler_offset =
3101 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3102 UCC_GETH_SCHEDULER_ALIGNMENT);
3103 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3104 ugeth_err
3105 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3106 __FUNCTION__);
3107 ucc_geth_memclean(ugeth);
3108 return -ENOMEM;
3109 }
3110
3111 ugeth->p_scheduler =
3112 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3113 scheduler_offset);
3114 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3115 ugeth->scheduler_offset);
3116 /* Zero out p_scheduler */
3117 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3118
3119 /* Set values in scheduler */
3120 out_be32(&ugeth->p_scheduler->mblinterval,
3121 ug_info->mblinterval);
3122 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3123 ug_info->nortsrbytetime);
3124 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3125 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3126 ugeth->p_scheduler->txasap = ug_info->txasap;
3127 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3128 for (i = 0; i < NUM_TX_QUEUES; i++)
3129 ugeth->p_scheduler->weightfactor[i] =
3130 ug_info->weightfactor[i];
3131
3132 /* Set pointers to cpucount registers in scheduler */
3133 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3134 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3135 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3136 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3137 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3138 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3139 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3140 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3141 }
3142
3143 /* schedulerbasepointer */
3144 /* TxRMON_PTR (statistics) */
3145 if (ug_info->
3146 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3147 ugeth->tx_fw_statistics_pram_offset =
3148 qe_muram_alloc(sizeof
3149 (ucc_geth_tx_firmware_statistics_pram_t),
3150 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3151 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3152 ugeth_err
3153 ("%s: Can not allocate DPRAM memory for"
3154 " p_tx_fw_statistics_pram.", __FUNCTION__);
3155 ucc_geth_memclean(ugeth);
3156 return -ENOMEM;
3157 }
3158 ugeth->p_tx_fw_statistics_pram =
3159 (ucc_geth_tx_firmware_statistics_pram_t *)
3160 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3161 /* Zero out p_tx_fw_statistics_pram */
3162 memset(ugeth->p_tx_fw_statistics_pram,
3163 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3164 }
3165
3166 /* temoder */
3167 /* Already has speed set */
3168
3169 if (ug_info->numQueuesTx > 1)
3170 temoder |= TEMODER_SCHEDULER_ENABLE;
3171 if (ug_info->ipCheckSumGenerate)
3172 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3173 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3174 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3175
3176 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3177
3178 /* Function code register value to be used later */
3179 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3180 /* Required for QE */
3181
3182 /* function code register */
3183 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3184
3185 /* Rx global PRAM */
3186 /* Allocate global rx parameter RAM page */
3187 ugeth->rx_glbl_pram_offset =
3188 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3189 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3190 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3191 ugeth_err
3192 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3193 __FUNCTION__);
3194 ucc_geth_memclean(ugeth);
3195 return -ENOMEM;
3196 }
3197 ugeth->p_rx_glbl_pram =
3198 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3199 rx_glbl_pram_offset);
3200 /* Zero out p_rx_glbl_pram */
3201 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3202
3203 /* Fill global PRAM */
3204
3205 /* RQPTR */
3206 /* Size varies with number of Rx threads */
3207 ugeth->thread_dat_rx_offset =
3208 qe_muram_alloc(numThreadsRxNumerical *
3209 sizeof(ucc_geth_thread_data_rx_t),
3210 UCC_GETH_THREAD_DATA_ALIGNMENT);
3211 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3212 ugeth_err
3213 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3214 __FUNCTION__);
3215 ucc_geth_memclean(ugeth);
3216 return -ENOMEM;
3217 }
3218
3219 ugeth->p_thread_data_rx =
3220 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3221 thread_dat_rx_offset);
3222 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3223
3224 /* typeorlen */
3225 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3226
3227 /* rxrmonbaseptr (statistics) */
3228 if (ug_info->
3229 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3230 ugeth->rx_fw_statistics_pram_offset =
3231 qe_muram_alloc(sizeof
3232 (ucc_geth_rx_firmware_statistics_pram_t),
3233 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3234 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3235 ugeth_err
3236 ("%s: Can not allocate DPRAM memory for"
3237 " p_rx_fw_statistics_pram.", __FUNCTION__);
3238 ucc_geth_memclean(ugeth);
3239 return -ENOMEM;
3240 }
3241 ugeth->p_rx_fw_statistics_pram =
3242 (ucc_geth_rx_firmware_statistics_pram_t *)
3243 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3244 /* Zero out p_rx_fw_statistics_pram */
3245 memset(ugeth->p_rx_fw_statistics_pram, 0,
3246 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3247 }
3248
3249 /* intCoalescingPtr */
3250
3251 /* Size varies with number of Rx queues */
3252 ugeth->rx_irq_coalescing_tbl_offset =
3253 qe_muram_alloc(ug_info->numQueuesRx *
3254 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3255 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3256 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3257 ugeth_err
3258 ("%s: Can not allocate DPRAM memory for"
3259 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3260 ucc_geth_memclean(ugeth);
3261 return -ENOMEM;
3262 }
3263
3264 ugeth->p_rx_irq_coalescing_tbl =
3265 (ucc_geth_rx_interrupt_coalescing_table_t *)
3266 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3267 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3268 ugeth->rx_irq_coalescing_tbl_offset);
3269
3270 /* Fill interrupt coalescing table */
3271 for (i = 0; i < ug_info->numQueuesRx; i++) {
3272 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3273 interruptcoalescingmaxvalue,
3274 ug_info->interruptcoalescingmaxvalue[i]);
3275 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3276 interruptcoalescingcounter,
3277 ug_info->interruptcoalescingmaxvalue[i]);
3278 }
3279
3280 /* MRBLR */
3281 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3282 &ugeth->p_rx_glbl_pram->mrblr);
3283 /* MFLR */
3284 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3285 /* MINFLR */
3286 init_min_frame_len(ug_info->minFrameLength,
3287 &ugeth->p_rx_glbl_pram->minflr,
3288 &ugeth->p_rx_glbl_pram->mrblr);
3289 /* MAXD1 */
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3291 /* MAXD2 */
3292 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3293
3294 /* l2qt */
3295 l2qt = 0;
3296 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3297 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3298 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3299
3300 /* l3qt */
3301 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3302 l3qt = 0;
3303 for (i = 0; i < 8; i++)
3304 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3305 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3306 }
3307
3308 /* vlantype */
3309 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3310
3311 /* vlantci */
3312 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3313
3314 /* ecamptr */
3315 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3316
3317 /* RBDQPTR */
3318 /* Size varies with number of Rx queues */
3319 ugeth->rx_bd_qs_tbl_offset =
3320 qe_muram_alloc(ug_info->numQueuesRx *
3321 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3322 sizeof(ucc_geth_rx_prefetched_bds_t)),
3323 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3324 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3325 ugeth_err
3326 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3327 __FUNCTION__);
3328 ucc_geth_memclean(ugeth);
3329 return -ENOMEM;
3330 }
3331
3332 ugeth->p_rx_bd_qs_tbl =
3333 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3334 rx_bd_qs_tbl_offset);
3335 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3336 /* Zero out p_rx_bd_qs_tbl */
3337 memset(ugeth->p_rx_bd_qs_tbl,
3338 0,
3339 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3340 sizeof(ucc_geth_rx_prefetched_bds_t)));
3341
3342 /* Setup the table */
3343 /* Assume BD rings are already established */
3344 for (i = 0; i < ug_info->numQueuesRx; i++) {
3345 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3346 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3347 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3348 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3349 MEM_PART_MURAM) {
3350 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3351 (u32) immrbar_virt_to_phys(ugeth->
3352 p_rx_bd_ring[i]));
3353 }
3354 /* rest of fields handled by QE */
3355 }
3356
3357 /* remoder */
3358 /* Already has speed set */
3359
3360 if (ugeth->rx_extended_features)
3361 remoder |= REMODER_RX_EXTENDED_FEATURES;
3362 if (ug_info->rxExtendedFiltering)
3363 remoder |= REMODER_RX_EXTENDED_FILTERING;
3364 if (ug_info->dynamicMaxFrameLength)
3365 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3366 if (ug_info->dynamicMinFrameLength)
3367 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3368 remoder |=
3369 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3370 remoder |=
3371 ug_info->
3372 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3373 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3374 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3375 if (ug_info->ipCheckSumCheck)
3376 remoder |= REMODER_IP_CHECKSUM_CHECK;
3377 if (ug_info->ipAddressAlignment)
3378 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3379 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3380
3381 /* Note that this function must be called */
3382 /* ONLY AFTER p_tx_fw_statistics_pram */
3383 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3384 init_firmware_statistics_gathering_mode((ug_info->
3385 statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3387 (ug_info->statisticsMode &
3388 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3389 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3390 ugeth->tx_fw_statistics_pram_offset,
3391 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3392 ugeth->rx_fw_statistics_pram_offset,
3393 &ugeth->p_tx_glbl_pram->temoder,
3394 &ugeth->p_rx_glbl_pram->remoder);
3395
3396 /* function code register */
3397 ugeth->p_rx_glbl_pram->rstate = function_code;
3398
3399 /* initialize extended filtering */
3400 if (ug_info->rxExtendedFiltering) {
3401 if (!ug_info->extendedFilteringChainPointer) {
3402 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3403 __FUNCTION__);
3404 ucc_geth_memclean(ugeth);
3405 return -EINVAL;
3406 }
3407
3408 /* Allocate memory for extended filtering Mode Global
3409 Parameters */
3410 ugeth->exf_glbl_param_offset =
3411 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3412 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3413 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3414 ugeth_err
3415 ("%s: Can not allocate DPRAM memory for"
3416 " p_exf_glbl_param.", __FUNCTION__);
3417 ucc_geth_memclean(ugeth);
3418 return -ENOMEM;
3419 }
3420
3421 ugeth->p_exf_glbl_param =
3422 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3423 exf_glbl_param_offset);
3424 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3425 ugeth->exf_glbl_param_offset);
3426 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3427 (u32) ug_info->extendedFilteringChainPointer);
3428
3429 } else { /* initialize 82xx style address filtering */
3430
3431 /* Init individual address recognition registers to disabled */
3432
3433 for (j = 0; j < NUM_OF_PADDRS; j++)
3434 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3435
3436 /* Create CQs for hash tables */
3437 if (ug_info->maxGroupAddrInHash > 0) {
3438 INIT_LIST_HEAD(&ugeth->group_hash_q);
3439 }
3440 if (ug_info->maxIndAddrInHash > 0) {
3441 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3442 }
3443 p_82xx_addr_filt =
3444 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3445 p_rx_glbl_pram->addressfiltering;
3446
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_GROUP);
3449 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3450 ENET_ADDR_TYPE_INDIVIDUAL);
3451 }
3452
3453 /*
3454 * Initialize UCC at QE level
3455 */
3456
3457 command = QE_INIT_TX_RX;
3458
3459 /* Allocate shadow InitEnet command parameter structure.
3460 * This is needed because after the InitEnet command is executed,
3461 * the structure in DPRAM is released, because DPRAM is a premium
3462 * resource.
3463 * This shadow structure keeps a copy of what was done so that the
3464 * allocated resources can be released when the channel is freed.
3465 */
3466 if (!(ugeth->p_init_enet_param_shadow =
3467 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3468 GFP_KERNEL))) {
3469 ugeth_err
3470 ("%s: Can not allocate memory for"
3471 " p_UccInitEnetParamShadows.", __FUNCTION__);
3472 ucc_geth_memclean(ugeth);
3473 return -ENOMEM;
3474 }
3475 /* Zero out *p_init_enet_param_shadow */
3476 memset((char *)ugeth->p_init_enet_param_shadow,
3477 0, sizeof(ucc_geth_init_pram_t));
3478
3479 /* Fill shadow InitEnet command parameter structure */
3480
3481 ugeth->p_init_enet_param_shadow->resinit1 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3483 ugeth->p_init_enet_param_shadow->resinit2 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3485 ugeth->p_init_enet_param_shadow->resinit3 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3487 ugeth->p_init_enet_param_shadow->resinit4 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3489 ugeth->p_init_enet_param_shadow->resinit5 =
3490 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3493 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3494 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3495
3496 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3497 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3498 if ((ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3502 && (ug_info->largestexternallookupkeysize !=
3503 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3504 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3505 __FUNCTION__);
3506 ucc_geth_memclean(ugeth);
3507 return -EINVAL;
3508 }
3509 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3510 ug_info->largestexternallookupkeysize;
3511 size = sizeof(ucc_geth_thread_rx_pram_t);
3512 if (ug_info->rxExtendedFiltering) {
3513 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3514 if (ug_info->largestexternallookupkeysize ==
3515 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3516 size +=
3517 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3518 if (ug_info->largestexternallookupkeysize ==
3519 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3520 size +=
3521 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3522 }
3523
3524 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3525 p_init_enet_param_shadow->rxthread[0]),
3526 (u8) (numThreadsRxNumerical + 1)
3527 /* Rx needs one extra for terminator */
3528 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3529 ug_info->riscRx, 1)) != 0) {
3530 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3531 __FUNCTION__);
3532 ucc_geth_memclean(ugeth);
3533 return ret_val;
3534 }
3535
3536 ugeth->p_init_enet_param_shadow->txglobal =
3537 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3538 if ((ret_val =
3539 fill_init_enet_entries(ugeth,
3540 &(ugeth->p_init_enet_param_shadow->
3541 txthread[0]), numThreadsTxNumerical,
3542 sizeof(ucc_geth_thread_tx_pram_t),
3543 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3544 ug_info->riscTx, 0)) != 0) {
3545 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3546 __FUNCTION__);
3547 ucc_geth_memclean(ugeth);
3548 return ret_val;
3549 }
3550
3551 /* Load Rx bds with buffers */
3552 for (i = 0; i < ug_info->numQueuesRx; i++) {
3553 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3554 ugeth_err("%s: Can not fill Rx bds with buffers.",
3555 __FUNCTION__);
3556 ucc_geth_memclean(ugeth);
3557 return ret_val;
3558 }
3559 }
3560
3561 /* Allocate InitEnet command parameter structure */
3562 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3563 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3564 ugeth_err
3565 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3566 __FUNCTION__);
3567 ucc_geth_memclean(ugeth);
3568 return -ENOMEM;
3569 }
3570 p_init_enet_pram =
3571 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3572
3573 /* Copy shadow InitEnet command parameter structure into PRAM */
3574 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3575 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3576 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3577 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3578 out_be16(&p_init_enet_pram->resinit5,
3579 ugeth->p_init_enet_param_shadow->resinit5);
3580 p_init_enet_pram->largestexternallookupkeysize =
3581 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3582 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3583 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3584 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3585 out_be32(&p_init_enet_pram->rxthread[i],
3586 ugeth->p_init_enet_param_shadow->rxthread[i]);
3587 out_be32(&p_init_enet_pram->txglobal,
3588 ugeth->p_init_enet_param_shadow->txglobal);
3589 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3590 out_be32(&p_init_enet_pram->txthread[i],
3591 ugeth->p_init_enet_param_shadow->txthread[i]);
3592
3593 /* Issue QE command */
3594 cecr_subblock =
3595 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3596 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3597 init_enet_pram_offset);
3598
3599 /* Free InitEnet command parameter */
3600 qe_muram_free(init_enet_pram_offset);
3601
3602 return 0;
3603}
3604
3605/* returns a net_device_stats structure pointer */
3606static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3607{
3608 ucc_geth_private_t *ugeth = netdev_priv(dev);
3609
3610 return &(ugeth->stats);
3611}
3612
3613/* ucc_geth_timeout gets called when a packet has not been
3614 * transmitted after a set amount of time.
3615 * For now, assume that clearing out all the structures, and
3616 * starting over will fix the problem. */
3617static void ucc_geth_timeout(struct net_device *dev)
3618{
3619 ucc_geth_private_t *ugeth = netdev_priv(dev);
3620
3621 ugeth_vdbg("%s: IN", __FUNCTION__);
3622
3623 ugeth->stats.tx_errors++;
3624
3625 ugeth_dump_regs(ugeth);
3626
3627 if (dev->flags & IFF_UP) {
3628 ucc_geth_stop(ugeth);
3629 ucc_geth_startup(ugeth);
3630 }
3631
3632 netif_schedule(dev);
3633}
3634
3635/* This is called by the kernel when a frame is ready for transmission. */
3636/* It is pointed to by the dev->hard_start_xmit function pointer */
3637static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3638{
3639 ucc_geth_private_t *ugeth = netdev_priv(dev);
3640 u8 *bd; /* BD pointer */
3641 u32 bd_status;
3642 u8 txQ = 0;
3643
3644 ugeth_vdbg("%s: IN", __FUNCTION__);
3645
3646 spin_lock_irq(&ugeth->lock);
3647
3648 ugeth->stats.tx_bytes += skb->len;
3649
3650 /* Start from the next BD that should be filled */
3651 bd = ugeth->txBd[txQ];
3652 bd_status = BD_STATUS_AND_LENGTH(bd);
3653 /* Save the skb pointer so we can free it later */
3654 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3655
3656 /* Update the current skb pointer (wrapping if this was the last) */
3657 ugeth->skb_curtx[txQ] =
3658 (ugeth->skb_curtx[txQ] +
3659 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3660
3661 /* set up the buffer descriptor */
3662 BD_BUFFER_SET(bd,
3663 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3664
3665 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3666
3667 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3668
3669 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3670
3671 dev->trans_start = jiffies;
3672
3673 /* Move to next BD in the ring */
3674 if (!(bd_status & T_W))
3675 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3676 else
3677 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3678
3679 /* If the next BD still needs to be cleaned up, then the bds
3680 are full. We need to tell the kernel to stop sending us stuff. */
3681 if (bd == ugeth->confBd[txQ]) {
3682 if (!netif_queue_stopped(dev))
3683 netif_stop_queue(dev);
3684 }
3685
3686 if (ugeth->p_scheduler) {
3687 ugeth->cpucount[txQ]++;
3688 /* Indicate to QE that there are more Tx bds ready for
3689 transmission */
3690 /* This is done by writing a running counter of the bd
3691 count to the scheduler PRAM. */
3692 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3693 }
3694
3695 spin_unlock_irq(&ugeth->lock);
3696
3697 return 0;
3698}
3699
3700static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3701{
3702 struct sk_buff *skb;
3703 u8 *bd;
3704 u16 length, howmany = 0;
3705 u32 bd_status;
3706 u8 *bdBuffer;
3707
3708 ugeth_vdbg("%s: IN", __FUNCTION__);
3709
3710 spin_lock(&ugeth->lock);
3711 /* collect received buffers */
3712 bd = ugeth->rxBd[rxQ];
3713
3714 bd_status = BD_STATUS_AND_LENGTH(bd);
3715
3716 /* while there are received buffers and BD is full (~R_E) */
3717 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3718 bdBuffer = (u8 *) BD_BUFFER(bd);
3719 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3720 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3721
3722 /* determine whether buffer is first, last, first and last
3723 (single buffer frame) or middle (not first and not last) */
3724 if (!skb ||
3725 (!(bd_status & (R_F | R_L))) ||
3726 (bd_status & R_ERRORS_FATAL)) {
3727 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3728 __FUNCTION__, __LINE__, (u32) skb);
3729 if (skb)
3730 dev_kfree_skb_any(skb);
3731
3732 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3733 ugeth->stats.rx_dropped++;
3734 } else {
3735 ugeth->stats.rx_packets++;
3736 howmany++;
3737
3738 /* Prep the skb for the packet */
3739 skb_put(skb, length);
3740
3741 /* Tell the skb what kind of packet this is */
3742 skb->protocol = eth_type_trans(skb, ugeth->dev);
3743
3744 ugeth->stats.rx_bytes += length;
3745 /* Send the packet up the stack */
3746#ifdef CONFIG_UGETH_NAPI
3747 netif_receive_skb(skb);
3748#else
3749 netif_rx(skb);
3750#endif /* CONFIG_UGETH_NAPI */
3751 }
3752
3753 ugeth->dev->last_rx = jiffies;
3754
3755 skb = get_new_skb(ugeth, bd);
3756 if (!skb) {
3757 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3758 spin_unlock(&ugeth->lock);
3759 ugeth->stats.rx_dropped++;
3760 break;
3761 }
3762
3763 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3764
3765 /* update to point at the next skb */
3766 ugeth->skb_currx[rxQ] =
3767 (ugeth->skb_currx[rxQ] +
3768 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3769
3770 if (bd_status & R_W)
3771 bd = ugeth->p_rx_bd_ring[rxQ];
3772 else
3773 bd += UCC_GETH_SIZE_OF_BD;
3774
3775 bd_status = BD_STATUS_AND_LENGTH(bd);
3776 }
3777
3778 ugeth->rxBd[rxQ] = bd;
3779 spin_unlock(&ugeth->lock);
3780 return howmany;
3781}
3782
3783static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3784{
3785 /* Start from the next BD that should be filled */
3786 ucc_geth_private_t *ugeth = netdev_priv(dev);
3787 u8 *bd; /* BD pointer */
3788 u32 bd_status;
3789
3790 bd = ugeth->confBd[txQ];
3791 bd_status = BD_STATUS_AND_LENGTH(bd);
3792
3793 /* Normal processing. */
3794 while ((bd_status & T_R) == 0) {
3795 /* BD contains already transmitted buffer. */
3796 /* Handle the transmitted buffer and release */
3797 /* the BD to be used with the current frame */
3798
3799 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3800 break;
3801
3802 ugeth->stats.tx_packets++;
3803
3804 /* Free the sk buffer associated with this TxBD */
3805 dev_kfree_skb_irq(ugeth->
3806 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3807 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3808 ugeth->skb_dirtytx[txQ] =
3809 (ugeth->skb_dirtytx[txQ] +
3810 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3811
3812 /* We freed a buffer, so now we can restart transmission */
3813 if (netif_queue_stopped(dev))
3814 netif_wake_queue(dev);
3815
3816 /* Advance the confirmation BD pointer */
3817 if (!(bd_status & T_W))
3818 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3819 else
3820 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3821 }
3822 return 0;
3823}
3824
3825#ifdef CONFIG_UGETH_NAPI
3826static int ucc_geth_poll(struct net_device *dev, int *budget)
3827{
3828 ucc_geth_private_t *ugeth = netdev_priv(dev);
3829 int howmany;
3830 int rx_work_limit = *budget;
3831 u8 rxQ = 0;
3832
3833 if (rx_work_limit > dev->quota)
3834 rx_work_limit = dev->quota;
3835
3836 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3837
3838 dev->quota -= howmany;
3839 rx_work_limit -= howmany;
3840 *budget -= howmany;
3841
3842 if (rx_work_limit >= 0)
3843 netif_rx_complete(dev);
3844
3845 return (rx_work_limit < 0) ? 1 : 0;
3846}
3847#endif /* CONFIG_UGETH_NAPI */
3848
3849static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3850 struct pt_regs *regs)
3851{
3852 struct net_device *dev = (struct net_device *)info;
3853 ucc_geth_private_t *ugeth = netdev_priv(dev);
3854 ucc_fast_private_t *uccf;
3855 ucc_geth_info_t *ug_info;
3856 register u32 ucce = 0;
3857 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3858 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3859 register u8 i;
3860
3861 ugeth_vdbg("%s: IN", __FUNCTION__);
3862
3863 if (!ugeth)
3864 return IRQ_NONE;
3865
3866 uccf = ugeth->uccf;
3867 ug_info = ugeth->ug_info;
3868
3869 do {
3870 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3871
3872 /* clear event bits for next time */
3873 /* Side effect here is to mask ucce variable
3874 for future processing below. */
3875 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3876 but only bits in UCCM */
3877
3878 /* We ignore Tx interrupts because Tx confirmation is
3879 done inside Tx routine */
3880
3881 for (i = 0; i < ug_info->numQueuesRx; i++) {
3882 if (ucce & bit_mask)
3883 ucc_geth_rx(ugeth, i,
3884 (int)ugeth->ug_info->
3885 bdRingLenRx[i]);
3886 ucce &= ~bit_mask;
3887 bit_mask <<= 1;
3888 }
3889
3890 for (i = 0; i < ug_info->numQueuesTx; i++) {
3891 if (ucce & tx_mask)
3892 ucc_geth_tx(dev, i);
3893 ucce &= ~tx_mask;
3894 tx_mask <<= 1;
3895 }
3896
3897 /* Exceptions */
3898 if (ucce & UCCE_BSY) {
3899 ugeth_vdbg("Got BUSY irq!!!!");
3900 ugeth->stats.rx_errors++;
3901 ucce &= ~UCCE_BSY;
3902 }
3903 if (ucce & UCCE_OTHER) {
3904 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3905 ucce);
3906 ugeth->stats.rx_errors++;
3907 ucce &= ~ucce;
3908 }
3909 }
3910 while (ucce);
3911
3912 return IRQ_HANDLED;
3913}
3914
3915static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3916{
3917 struct net_device *dev = (struct net_device *)dev_id;
3918 ucc_geth_private_t *ugeth = netdev_priv(dev);
3919
3920 ugeth_vdbg("%s: IN", __FUNCTION__);
3921
3922 /* Clear the interrupt */
3923 mii_clear_phy_interrupt(ugeth->mii_info);
3924
3925 /* Disable PHY interrupts */
3926 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3927
3928 /* Schedule the phy change */
3929 schedule_work(&ugeth->tq);
3930
3931 return IRQ_HANDLED;
3932}
3933
3934/* Scheduled by the phy_interrupt/timer to handle PHY changes */
3935static void ugeth_phy_change(void *data)
3936{
3937 struct net_device *dev = (struct net_device *)data;
3938 ucc_geth_private_t *ugeth = netdev_priv(dev);
3939 ucc_geth_t *ug_regs;
3940 int result = 0;
3941
3942 ugeth_vdbg("%s: IN", __FUNCTION__);
3943
3944 ug_regs = ugeth->ug_regs;
3945
3946 /* Delay to give the PHY a chance to change the
3947 * register state */
3948 msleep(1);
3949
3950 /* Update the link, speed, duplex */
3951 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3952
3953 /* Adjust the known status as long as the link
3954 * isn't still coming up */
3955 if ((0 == result) || (ugeth->mii_info->link == 0))
3956 adjust_link(dev);
3957
3958 /* Reenable interrupts, if needed */
3959 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3960 mii_configure_phy_interrupt(ugeth->mii_info,
3961 MII_INTERRUPT_ENABLED);
3962}
3963
3964/* Called every so often on systems that don't interrupt
3965 * the core for PHY changes */
3966static void ugeth_phy_timer(unsigned long data)
3967{
3968 struct net_device *dev = (struct net_device *)data;
3969 ucc_geth_private_t *ugeth = netdev_priv(dev);
3970
3971 schedule_work(&ugeth->tq);
3972
3973 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3974}
3975
3976/* Keep trying aneg for some time
3977 * If, after GFAR_AN_TIMEOUT seconds, it has not
3978 * finished, we switch to forced.
3979 * Either way, once the process has completed, we either
3980 * request the interrupt, or switch the timer over to
3981 * using ugeth_phy_timer to check status */
3982static void ugeth_phy_startup_timer(unsigned long data)
3983{
3984 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3985 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3986 static int secondary = UGETH_AN_TIMEOUT;
3987 int result;
3988
3989 /* Configure the Auto-negotiation */
3990 result = mii_info->phyinfo->config_aneg(mii_info);
3991
3992 /* If autonegotiation failed to start, and
3993 * we haven't timed out, reset the timer, and return */
3994 if (result && secondary--) {
3995 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3996 return;
3997 } else if (result) {
3998 /* Couldn't start autonegotiation.
3999 * Try switching to forced */
4000 mii_info->autoneg = 0;
4001 result = mii_info->phyinfo->config_aneg(mii_info);
4002
4003 /* Forcing failed! Give up */
4004 if (result) {
4005 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4006 return;
4007 }
4008 }
4009
4010 /* Kill the timer so it can be restarted */
4011 del_timer_sync(&ugeth->phy_info_timer);
4012
4013 /* Grab the PHY interrupt, if necessary/possible */
4014 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4015 if (request_irq(ugeth->ug_info->phy_interrupt,
4016 phy_interrupt,
4017 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4018 ugeth_err("%s: Can't get IRQ %d (PHY)",
4019 mii_info->dev->name,
4020 ugeth->ug_info->phy_interrupt);
4021 } else {
4022 mii_configure_phy_interrupt(ugeth->mii_info,
4023 MII_INTERRUPT_ENABLED);
4024 return;
4025 }
4026 }
4027
4028 /* Start the timer again, this time in order to
4029 * handle a change in status */
4030 init_timer(&ugeth->phy_info_timer);
4031 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4032 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4033 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4034}
4035
4036/* Called when something needs to use the ethernet device */
4037/* Returns 0 for success. */
4038static int ucc_geth_open(struct net_device *dev)
4039{
4040 ucc_geth_private_t *ugeth = netdev_priv(dev);
4041 int err;
4042
4043 ugeth_vdbg("%s: IN", __FUNCTION__);
4044
4045 /* Test station address */
4046 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4047 ugeth_err("%s: Multicast address used for station address"
4048 " - is this what you wanted?", __FUNCTION__);
4049 return -EINVAL;
4050 }
4051
4052 err = ucc_geth_startup(ugeth);
4053 if (err) {
4054 ugeth_err("%s: Cannot configure net device, aborting.",
4055 dev->name);
4056 return err;
4057 }
4058
4059 err = adjust_enet_interface(ugeth);
4060 if (err) {
4061 ugeth_err("%s: Cannot configure net device, aborting.",
4062 dev->name);
4063 return err;
4064 }
4065
4066 /* Set MACSTNADDR1, MACSTNADDR2 */
4067 /* For more details see the hardware spec. */
4068 init_mac_station_addr_regs(dev->dev_addr[0],
4069 dev->dev_addr[1],
4070 dev->dev_addr[2],
4071 dev->dev_addr[3],
4072 dev->dev_addr[4],
4073 dev->dev_addr[5],
4074 &ugeth->ug_regs->macstnaddr1,
4075 &ugeth->ug_regs->macstnaddr2);
4076
4077 err = init_phy(dev);
4078 if (err) {
4079 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4080 return err;
4081 }
4082#ifndef CONFIG_UGETH_NAPI
4083 err =
4084 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4085 "UCC Geth", dev);
4086 if (err) {
4087 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4088 dev->name);
4089 ucc_geth_stop(ugeth);
4090 return err;
4091 }
4092#endif /* CONFIG_UGETH_NAPI */
4093
4094 /* Set up the PHY change work queue */
4095 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4096
4097 init_timer(&ugeth->phy_info_timer);
4098 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4099 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4100 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4101
4102 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4103 if (err) {
4104 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4105 ucc_geth_stop(ugeth);
4106 return err;
4107 }
4108
4109 netif_start_queue(dev);
4110
4111 return err;
4112}
4113
4114/* Stops the kernel queue, and halts the controller */
4115static int ucc_geth_close(struct net_device *dev)
4116{
4117 ucc_geth_private_t *ugeth = netdev_priv(dev);
4118
4119 ugeth_vdbg("%s: IN", __FUNCTION__);
4120
4121 ucc_geth_stop(ugeth);
4122
4123 /* Shutdown the PHY */
4124 if (ugeth->mii_info->phyinfo->close)
4125 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4126
4127 kfree(ugeth->mii_info);
4128
4129 netif_stop_queue(dev);
4130
4131 return 0;
4132}
4133
4134struct ethtool_ops ucc_geth_ethtool_ops = {
4135 .get_settings = NULL,
4136 .get_drvinfo = NULL,
4137 .get_regs_len = NULL,
4138 .get_regs = NULL,
4139 .get_link = NULL,
4140 .get_coalesce = NULL,
4141 .set_coalesce = NULL,
4142 .get_ringparam = NULL,
4143 .set_ringparam = NULL,
4144 .get_strings = NULL,
4145 .get_stats_count = NULL,
4146 .get_ethtool_stats = NULL,
4147};
4148
4149static int ucc_geth_probe(struct device *device)
4150{
4151 struct platform_device *pdev = to_platform_device(device);
4152 struct ucc_geth_platform_data *ugeth_pdata;
4153 struct net_device *dev = NULL;
4154 struct ucc_geth_private *ugeth = NULL;
4155 struct ucc_geth_info *ug_info;
4156 int err;
4157 static int mii_mng_configured = 0;
4158
4159 ugeth_vdbg("%s: IN", __FUNCTION__);
4160
4161 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4162
4163 ug_info = &ugeth_info[pdev->id];
4164 ug_info->uf_info.ucc_num = pdev->id;
4165 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4166 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4167 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4168 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4169 ug_info->phy_address = ugeth_pdata->phy_id;
4170 ug_info->enet_interface = ugeth_pdata->phy_interface;
4171 ug_info->board_flags = ugeth_pdata->board_flags;
4172 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4173
4174 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4175 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4176 ug_info->uf_info.irq);
4177
4178 if (ug_info == NULL) {
4179 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4180 pdev->id);
4181 return -ENODEV;
4182 }
4183
4184 if (!mii_mng_configured) {
4185 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4186 mii_mng_configured = 1;
4187 }
4188
4189 /* Create an ethernet device instance */
4190 dev = alloc_etherdev(sizeof(*ugeth));
4191
4192 if (dev == NULL)
4193 return -ENOMEM;
4194
4195 ugeth = netdev_priv(dev);
4196 spin_lock_init(&ugeth->lock);
4197
4198 dev_set_drvdata(device, dev);
4199
4200 /* Set the dev->base_addr to the gfar reg region */
4201 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4202
4203 SET_MODULE_OWNER(dev);
4204 SET_NETDEV_DEV(dev, device);
4205
4206 /* Fill in the dev structure */
4207 dev->open = ucc_geth_open;
4208 dev->hard_start_xmit = ucc_geth_start_xmit;
4209 dev->tx_timeout = ucc_geth_timeout;
4210 dev->watchdog_timeo = TX_TIMEOUT;
4211#ifdef CONFIG_UGETH_NAPI
4212 dev->poll = ucc_geth_poll;
4213 dev->weight = UCC_GETH_DEV_WEIGHT;
4214#endif /* CONFIG_UGETH_NAPI */
4215 dev->stop = ucc_geth_close;
4216 dev->get_stats = ucc_geth_get_stats;
4217// dev->change_mtu = ucc_geth_change_mtu;
4218 dev->mtu = 1500;
4219 dev->set_multicast_list = ucc_geth_set_multi;
4220 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4221
4222 err = register_netdev(dev);
4223 if (err) {
4224 ugeth_err("%s: Cannot register net device, aborting.",
4225 dev->name);
4226 free_netdev(dev);
4227 return err;
4228 }
4229
4230 ugeth->ug_info = ug_info;
4231 ugeth->dev = dev;
4232 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4233
4234 return 0;
4235}
4236
4237static int ucc_geth_remove(struct device *device)
4238{
4239 struct net_device *dev = dev_get_drvdata(device);
4240 struct ucc_geth_private *ugeth = netdev_priv(dev);
4241
4242 dev_set_drvdata(device, NULL);
4243 ucc_geth_memclean(ugeth);
4244 free_netdev(dev);
4245
4246 return 0;
4247}
4248
4249/* Structure for a device driver */
4250static struct device_driver ucc_geth_driver = {
4251 .name = DRV_NAME,
4252 .bus = &platform_bus_type,
4253 .probe = ucc_geth_probe,
4254 .remove = ucc_geth_remove,
4255};
4256
4257static int __init ucc_geth_init(void)
4258{
4259 int i;
4260 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4261 for (i = 0; i < 8; i++)
4262 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4263 sizeof(ugeth_primary_info));
4264
4265 return driver_register(&ucc_geth_driver);
4266}
4267
4268static void __exit ucc_geth_exit(void)
4269{
4270 driver_unregister(&ucc_geth_driver);
4271}
4272
4273module_init(ucc_geth_init);
4274module_exit(ucc_geth_exit);
4275
4276MODULE_AUTHOR("Freescale Semiconductor, Inc");
4277MODULE_DESCRIPTION(DRV_DESC);
4278MODULE_LICENSE("GPL");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
new file mode 100644
index 00000000000..005965f5dd9
--- /dev/null
+++ b/drivers/net/ucc_geth.h
@@ -0,0 +1,1339 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * Internal header file for UCC Gigabit Ethernet unit routines.
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#ifndef __UCC_GETH_H__
19#define __UCC_GETH_H__
20
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/fsl_devices.h>
24
25#include <asm/immap_qe.h>
26#include <asm/qe.h>
27
28#include <asm/ucc.h>
29#include <asm/ucc_fast.h>
30
31#define NUM_TX_QUEUES 8
32#define NUM_RX_QUEUES 8
33#define NUM_BDS_IN_PREFETCHED_BDS 4
34#define TX_IP_OFFSET_ENTRY_MAX 8
35#define NUM_OF_PADDRS 4
36#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
37#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
38
39typedef struct ucc_mii_mng {
40 u32 miimcfg; /* MII management configuration reg */
41 u32 miimcom; /* MII management command reg */
42 u32 miimadd; /* MII management address reg */
43 u32 miimcon; /* MII management control reg */
44 u32 miimstat; /* MII management status reg */
45 u32 miimind; /* MII management indication reg */
46} __attribute__ ((packed)) ucc_mii_mng_t;
47
48typedef struct ucc_geth {
49 ucc_fast_t uccf;
50
51 u32 maccfg1; /* mac configuration reg. 1 */
52 u32 maccfg2; /* mac configuration reg. 2 */
53 u32 ipgifg; /* interframe gap reg. */
54 u32 hafdup; /* half-duplex reg. */
55 u8 res1[0x10];
56 ucc_mii_mng_t miimng; /* MII management structure */
57 u32 ifctl; /* interface control reg */
58 u32 ifstat; /* interface statux reg */
59 u32 macstnaddr1; /* mac station address part 1 reg */
60 u32 macstnaddr2; /* mac station address part 2 reg */
61 u8 res2[0x8];
62 u32 uempr; /* UCC Ethernet Mac parameter reg */
63 u32 utbipar; /* UCC tbi address reg */
64 u16 uescr; /* UCC Ethernet statistics control reg */
65 u8 res3[0x180 - 0x15A];
66 u32 tx64; /* Total number of frames (including bad
67 frames) transmitted that were exactly of the
68 minimal length (64 for un tagged, 68 for
69 tagged, or with length exactly equal to the
70 parameter MINLength */
71 u32 tx127; /* Total number of frames (including bad
72 frames) transmitted that were between
73 MINLength (Including FCS length==4) and 127
74 octets */
75 u32 tx255; /* Total number of frames (including bad
76 frames) transmitted that were between 128
77 (Including FCS length==4) and 255 octets */
78 u32 rx64; /* Total number of frames received including
79 bad frames that were exactly of the mninimal
80 length (64 bytes) */
81 u32 rx127; /* Total number of frames (including bad
82 frames) received that were between MINLength
83 (Including FCS length==4) and 127 octets */
84 u32 rx255; /* Total number of frames (including bad
85 frames) received that were between 128
86 (Including FCS length==4) and 255 octets */
87 u32 txok; /* Total number of octets residing in frames
88 that where involved in succesfull
89 transmission */
90 u16 txcf; /* Total number of PAUSE control frames
91 transmitted by this MAC */
92 u8 res4[0x2];
93 u32 tmca; /* Total number of frames that were transmitted
94 succesfully with the group address bit set
95 that are not broadcast frames */
96 u32 tbca; /* Total number of frames transmitted
97 succesfully that had destination address
98 field equal to the broadcast address */
99 u32 rxfok; /* Total number of frames received OK */
100 u32 rxbok; /* Total number of octets received OK */
101 u32 rbyt; /* Total number of octets received including
102 octets in bad frames. Must be implemented in
103 HW because it includes octets in frames that
104 never even reach the UCC */
105 u32 rmca; /* Total number of frames that were received
106 succesfully with the group address bit set
107 that are not broadcast frames */
108 u32 rbca; /* Total number of frames received succesfully
109 that had destination address equal to the
110 broadcast address */
111 u32 scar; /* Statistics carry register */
112 u32 scam; /* Statistics caryy mask register */
113 u8 res5[0x200 - 0x1c4];
114} __attribute__ ((packed)) ucc_geth_t;
115
116/* UCC GETH TEMODR Register */
117#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
118 */
119#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
120#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
121 checksums */
122#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
123 optimization
124 enhancement (mode1) */
125#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
126 */
127#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
128 shift */
129
130/* UCC GETH TEMODR Register */
131#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
132 statistics */
133#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
134 extended
135 features */
136#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
137 tagged << shift */
138#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
139 tagged << shift */
140#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
141 */
142#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
143 statistics */
144#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
145 filtering
146 vs.
147 mpc82xx-like
148 filtering */
149#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
150 shift */
151#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
152 dynamic max
153 frame length
154 */
155#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
156 dynamic min
157 frame length
158 */
159#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
160 checksums */
161#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
162 address to
163 4-byte
164 boundary */
165
166/* UCC GETH Event Register */
167#define UCCE_MPD 0x80000000 /* Magic packet
168 detection */
169#define UCCE_SCAR 0x40000000
170#define UCCE_GRA 0x20000000 /* Tx graceful
171 stop
172 complete */
173#define UCCE_CBPR 0x10000000
174#define UCCE_BSY 0x08000000
175#define UCCE_RXC 0x04000000
176#define UCCE_TXC 0x02000000
177#define UCCE_TXE 0x01000000
178#define UCCE_TXB7 0x00800000
179#define UCCE_TXB6 0x00400000
180#define UCCE_TXB5 0x00200000
181#define UCCE_TXB4 0x00100000
182#define UCCE_TXB3 0x00080000
183#define UCCE_TXB2 0x00040000
184#define UCCE_TXB1 0x00020000
185#define UCCE_TXB0 0x00010000
186#define UCCE_RXB7 0x00008000
187#define UCCE_RXB6 0x00004000
188#define UCCE_RXB5 0x00002000
189#define UCCE_RXB4 0x00001000
190#define UCCE_RXB3 0x00000800
191#define UCCE_RXB2 0x00000400
192#define UCCE_RXB1 0x00000200
193#define UCCE_RXB0 0x00000100
194#define UCCE_RXF7 0x00000080
195#define UCCE_RXF6 0x00000040
196#define UCCE_RXF5 0x00000020
197#define UCCE_RXF4 0x00000010
198#define UCCE_RXF3 0x00000008
199#define UCCE_RXF2 0x00000004
200#define UCCE_RXF1 0x00000002
201#define UCCE_RXF0 0x00000001
202
203#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
204#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
205
206#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
207 UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
208#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
209 UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
210#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
211 UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
212#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
213 UCCE_RXC | UCCE_TXC | UCCE_TXE)
214
215/* UCC GETH UPSMR (Protocol Specific Mode Register) */
216#define UPSMR_ECM 0x04000000 /* Enable CAM
217 Miss or
218 Enable
219 Filtering
220 Miss */
221#define UPSMR_HSE 0x02000000 /* Hardware
222 Statistics
223 Enable */
224#define UPSMR_PRO 0x00400000 /* Promiscuous*/
225#define UPSMR_CAP 0x00200000 /* CAM polarity
226 */
227#define UPSMR_RSH 0x00100000 /* Receive
228 Short Frames
229 */
230#define UPSMR_RPM 0x00080000 /* Reduced Pin
231 Mode
232 interfaces */
233#define UPSMR_R10M 0x00040000 /* RGMII/RMII
234 10 Mode */
235#define UPSMR_RLPB 0x00020000 /* RMII
236 Loopback
237 Mode */
238#define UPSMR_TBIM 0x00010000 /* Ten-bit
239 Interface
240 Mode */
241#define UPSMR_RMM 0x00001000 /* RMII/RGMII
242 Mode */
243#define UPSMR_CAM 0x00000400 /* CAM Address
244 Matching */
245#define UPSMR_BRO 0x00000200 /* Broadcast
246 Address */
247#define UPSMR_RES1 0x00002000 /* Reserved
248 feild - must
249 be 1 */
250
251/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
252#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
253 Rx */
254#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
255 Tx */
256#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
257 synchronized
258 to Rx stream
259 */
260#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
261#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
262 synchronized
263 to Tx stream
264 */
265#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
266
267/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
268#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
269 Length <<
270 shift */
271#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
272 Length mask */
273#define MACCFG2_SRP 0x00000080 /* Soft Receive
274 Preamble */
275#define MACCFG2_STP 0x00000040 /* Soft
276 Transmit
277 Preamble */
278#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
279 must be set
280 to 1 */
281#define MACCFG2_LC 0x00000010 /* Length Check
282 */
283#define MACCFG2_MPE 0x00000008 /* Magic packet
284 detect */
285#define MACCFG2_FDX 0x00000001 /* Full Duplex */
286#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
287 mask */
288#define MACCFG2_PAD_CRC 0x00000004
289#define MACCFG2_CRC_EN 0x00000002
290#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
291 Padding
292 short frames
293 nor CRC */
294#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
295 only */
296#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
297#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
298 (MII/RMII/RGMII
299 10/100bps) */
300#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
301 (GMII/TBI/RTB/RGMII
302 1000bps ) */
303#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
304 covering all
305 relevant
306 bits */
307
308/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
309#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
310 back-to-back
311 inter frame
312 gap part 1.
313 << shift */
314#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
315 back-to-back
316 inter frame
317 gap part 2.
318 << shift */
319#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
320 Enforcement
321 << shift */
322#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
323 inter frame
324 gap << shift
325 */
326#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
327 inter frame gap part
328 1. max val */
329#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
330 inter frame gap part
331 2. max val */
332#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
333 Enforcement max val */
334#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
335 frame gap max val */
336#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
337#define IPGIFG_NBTB_IPG_MASK 0x007F0000
338#define IPGIFG_MIN_IFG_MASK 0x0000FF00
339#define IPGIFG_BTB_IPG_MASK 0x0000007F
340
341/* UCC GETH HAFDUP (Half Duplex Register) */
342#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
343 Binary
344 Exponential
345 Backoff
346 Truncation
347 << shift */
348#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
349 Exponential Backoff
350 Truncation max val */
351#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
352 Binary
353 Exponential
354 Backoff */
355#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
356 pressure no
357 backoff */
358#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
359#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
360 Defer */
361#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
362 Retransmission
363 << shift */
364#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
365 Retransmission max
366 val */
367#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
368 Window <<
369 shift */
370#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
371 val */
372#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
373#define HALFDUP_RETRANS_MASK 0x0000F000
374#define HALFDUP_COL_WINDOW_MASK 0x0000003F
375
376/* UCC GETH UCCS (Ethernet Status Register) */
377#define UCCS_BPR 0x02 /* Back pressure (in
378 half duplex mode) */
379#define UCCS_PAU 0x02 /* Pause state (in full
380 duplex mode) */
381#define UCCS_MPD 0x01 /* Magic Packet
382 Detected */
383
384/* UCC GETH MIIMCFG (MII Management Configuration Register) */
385#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
386 management */
387#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
388 suppress */
389#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
390 << shift */
391#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val
392 */
393#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */
394#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */
395#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */
396#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */
397#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10
398 */
399#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14
400 */
401#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16
402 */
403#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20
404 */
405#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28
406 */
407#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32
408 */
409#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48
410 */
411#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64
412 */
413#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80
414 */
415#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by
416 112 */
417#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by
418 160 */
419#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by
420 224 */
421
422/* UCC GETH MIIMCOM (MII Management Command Register) */
423#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
424#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
425
426/* UCC GETH MIIMADD (MII Management Address Register) */
427#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
428 << shift */
429#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
430 << shift */
431
432/* UCC GETH MIIMCON (MII Management Control Register) */
433#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
434 << shift */
435#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
436 << shift */
437
438/* UCC GETH MIIMIND (MII Management Indicator Register) */
439#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
440#define MIIMIND_SCAN 0x00000002 /* Scan in
441 progress */
442#define MIIMIND_BUSY 0x00000001
443
444/* UCC GETH IFSTAT (Interface Status Register) */
445#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
446 transmission
447 defer */
448
449/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
450#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
451 address 6th
452 octet <<
453 shift */
454#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
455 address 5th
456 octet <<
457 shift */
458#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
459 address 4th
460 octet <<
461 shift */
462#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
463 address 3rd
464 octet <<
465 shift */
466
467/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
468#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
469 address 2nd
470 octet <<
471 shift */
472#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
473 address 1st
474 octet <<
475 shift */
476
477/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
478#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
479 value <<
480 shift */
481#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
482 pause time
483 value <<
484 shift */
485
486/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
487#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
488 << shift */
489#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
490 mask */
491
492/* UCC GETH UESCR (Ethernet Statistics Control Register) */
493#define UESCR_AUTOZ 0x8000 /* Automatically zero
494 addressed
495 statistical counter
496 values */
497#define UESCR_CLRCNT 0x4000 /* Clear all statistics
498 counters */
499#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
500 Coalescing
501 Value <<
502 shift */
503#define UESCR_SCOV_SHIFT (15 - 15) /* Status
504 Coalescing
505 Value <<
506 shift */
507
508/* UCC GETH UDSR (Data Synchronization Register) */
509#define UDSR_MAGIC 0x067E
510
511typedef struct ucc_geth_thread_data_tx {
512 u8 res0[104];
513} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
514
515typedef struct ucc_geth_thread_data_rx {
516 u8 res0[40];
517} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
518
519/* Send Queue Queue-Descriptor */
520typedef struct ucc_geth_send_queue_qd {
521 u32 bd_ring_base; /* pointer to BD ring base address */
522 u8 res0[0x8];
523 u32 last_bd_completed_address;/* initialize to last entry in BD ring */
524 u8 res1[0x30];
525} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
526
527typedef struct ucc_geth_send_queue_mem_region {
528 ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
529} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
530
531typedef struct ucc_geth_thread_tx_pram {
532 u8 res0[64];
533} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
534
535typedef struct ucc_geth_thread_rx_pram {
536 u8 res0[128];
537} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
538
539#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
540#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
541#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
542
543typedef struct ucc_geth_scheduler {
544 u16 cpucount0; /* CPU packet counter */
545 u16 cpucount1; /* CPU packet counter */
546 u16 cecount0; /* QE packet counter */
547 u16 cecount1; /* QE packet counter */
548 u16 cpucount2; /* CPU packet counter */
549 u16 cpucount3; /* CPU packet counter */
550 u16 cecount2; /* QE packet counter */
551 u16 cecount3; /* QE packet counter */
552 u16 cpucount4; /* CPU packet counter */
553 u16 cpucount5; /* CPU packet counter */
554 u16 cecount4; /* QE packet counter */
555 u16 cecount5; /* QE packet counter */
556 u16 cpucount6; /* CPU packet counter */
557 u16 cpucount7; /* CPU packet counter */
558 u16 cecount6; /* QE packet counter */
559 u16 cecount7; /* QE packet counter */
560 u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
561 u32 rtsrshadow; /* temporary variable handled by QE */
562 u32 time; /* temporary variable handled by QE */
563 u32 ttl; /* temporary variable handled by QE */
564 u32 mblinterval; /* max burst length interval */
565 u16 nortsrbytetime; /* normalized value of byte time in tsr units */
566 u8 fracsiz; /* radix 2 log value of denom. of
567 NorTSRByteTime */
568 u8 res0[1];
569 u8 strictpriorityq; /* Strict Priority Mask register */
570 u8 txasap; /* Transmit ASAP register */
571 u8 extrabw; /* Extra BandWidth register */
572 u8 oldwfqmask; /* temporary variable handled by QE */
573 u8 weightfactor[NUM_TX_QUEUES];
574 /**< weight factor for queues */
575 u32 minw; /* temporary variable handled by QE */
576 u8 res1[0x70 - 0x64];
577} __attribute__ ((packed)) ucc_geth_scheduler_t;
578
579typedef struct ucc_geth_tx_firmware_statistics_pram {
580 u32 sicoltx; /* single collision */
581 u32 mulcoltx; /* multiple collision */
582 u32 latecoltxfr; /* late collision */
583 u32 frabortduecol; /* frames aborted due to transmit collision */
584 u32 frlostinmactxer; /* frames lost due to internal MAC error
585 transmission that are not counted on any
586 other counter */
587 u32 carriersenseertx; /* carrier sense error */
588 u32 frtxok; /* frames transmitted OK */
589 u32 txfrexcessivedefer; /* frames with defferal time greater than
590 specified threshold */
591 u32 txpkts256; /* total packets (including bad) between 256
592 and 511 octets */
593 u32 txpkts512; /* total packets (including bad) between 512
594 and 1023 octets */
595 u32 txpkts1024; /* total packets (including bad) between 1024
596 and 1518 octets */
597 u32 txpktsjumbo; /* total packets (including bad) between 1024
598 and MAXLength octets */
599} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
600
601typedef struct ucc_geth_rx_firmware_statistics_pram {
602 u32 frrxfcser; /* frames with crc error */
603 u32 fraligner; /* frames with alignment error */
604 u32 inrangelenrxer; /* in range length error */
605 u32 outrangelenrxer; /* out of range length error */
606 u32 frtoolong; /* frame too long */
607 u32 runt; /* runt */
608 u32 verylongevent; /* very long event */
609 u32 symbolerror; /* symbol error */
610 u32 dropbsy; /* drop because of BD not ready */
611 u8 res0[0x8];
612 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
613 or type mismatch) */
614 u32 underpkts; /* total frames less than 64 octets */
615 u32 pkts256; /* total frames (including bad) between 256 and
616 511 octets */
617 u32 pkts512; /* total frames (including bad) between 512 and
618 1023 octets */
619 u32 pkts1024; /* total frames (including bad) between 1024
620 and 1518 octets */
621 u32 pktsjumbo; /* total frames (including bad) between 1024
622 and MAXLength octets */
623 u32 frlossinmacer; /* frames lost because of internal MAC error
624 that is not counted in any other counter */
625 u32 pausefr; /* pause frames */
626 u8 res1[0x4];
627 u32 removevlan; /* total frames that had their VLAN tag removed
628 */
629 u32 replacevlan; /* total frames that had their VLAN tag
630 replaced */
631 u32 insertvlan; /* total frames that had their VLAN tag
632 inserted */
633} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
634
635typedef struct ucc_geth_rx_interrupt_coalescing_entry {
636 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
637 value */
638 u32 interruptcoalescingcounter; /* interrupt coalescing counter,
639 initialize to
640 interruptcoalescingmaxvalue */
641} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
642
643typedef struct ucc_geth_rx_interrupt_coalescing_table {
644 ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
645 /**< interrupt coalescing entry */
646} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
647
648typedef struct ucc_geth_rx_prefetched_bds {
649 qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
650} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
651
652typedef struct ucc_geth_rx_bd_queues_entry {
653 u32 bdbaseptr; /* BD base pointer */
654 u32 bdptr; /* BD pointer */
655 u32 externalbdbaseptr; /* external BD base pointer */
656 u32 externalbdptr; /* external BD pointer */
657} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
658
659typedef struct ucc_geth_tx_global_pram {
660 u16 temoder;
661 u8 res0[0x38 - 0x02];
662 u32 sqptr; /* a base pointer to send queue memory region */
663 u32 schedulerbasepointer; /* a base pointer to scheduler memory
664 region */
665 u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
666 u32 tstate; /* tx internal state. High byte contains
667 function code */
668 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
669 u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
670 u32 tqptr; /* a base pointer to the Tx Queues Memory
671 Region */
672 u8 res2[0x80 - 0x74];
673} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
674
675/* structure representing Extended Filtering Global Parameters in PRAM */
676typedef struct ucc_geth_exf_global_pram {
677 u32 l2pcdptr; /* individual address filter, high */
678 u8 res0[0x10 - 0x04];
679} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
680
681typedef struct ucc_geth_rx_global_pram {
682 u32 remoder; /* ethernet mode reg. */
683 u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
684 u32 res0[0x1];
685 u8 res1[0x20 - 0xC];
686 u16 typeorlen; /* cutoff point less than which, type/len field
687 is considered length */
688 u8 res2[0x1];
689 u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
690 u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
691 u8 res3[0x30 - 0x28];
692 u32 intcoalescingptr; /* Interrupt coalescing table pointer */
693 u8 res4[0x36 - 0x34];
694 u8 rstate; /* rx internal state. High byte contains
695 function code */
696 u8 res5[0x46 - 0x37];
697 u16 mrblr; /* max receive buffer length reg. */
698 u32 rbdqptr; /* base pointer to RxBD parameter table
699 description */
700 u16 mflr; /* max frame length reg. */
701 u16 minflr; /* min frame length reg. */
702 u16 maxd1; /* max dma1 length reg. */
703 u16 maxd2; /* max dma2 length reg. */
704 u32 ecamptr; /* external CAM address */
705 u32 l2qt; /* VLAN priority mapping table. */
706 u32 l3qt[0x8]; /* IP priority mapping table. */
707 u16 vlantype; /* vlan type */
708 u16 vlantci; /* default vlan tci */
709 u8 addressfiltering[64]; /* address filtering data structure */
710 u32 exfGlobalParam; /* base address for extended filtering global
711 parameters */
712 u8 res6[0x100 - 0xC4]; /* Initialize to zero */
713} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
714
715#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
716
717/* structure representing InitEnet command */
718typedef struct ucc_geth_init_pram {
719 u8 resinit1;
720 u8 resinit2;
721 u8 resinit3;
722 u8 resinit4;
723 u16 resinit5;
724 u8 res1[0x1];
725 u8 largestexternallookupkeysize;
726 u32 rgftgfrxglobal;
727 u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
728 u8 res2[0x38 - 0x30];
729 u32 txglobal; /* tx global */
730 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
731 u8 res3[0x1];
732} __attribute__ ((packed)) ucc_geth_init_pram_t;
733
734#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
735#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
736
737#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
738#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
739#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
740#define ENET_INIT_PARAM_SNUM_SHIFT 24
741
742#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
743#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
744#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
745#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
746#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
747
748/* structure representing 82xx Address Filtering Enet Address in PRAM */
749typedef struct ucc_geth_82xx_enet_address {
750 u8 res1[0x2];
751 u16 h; /* address (MSB) */
752 u16 m; /* address */
753 u16 l; /* address (LSB) */
754} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
755
756/* structure representing 82xx Address Filtering PRAM */
757typedef struct ucc_geth_82xx_address_filtering_pram {
758 u32 iaddr_h; /* individual address filter, high */
759 u32 iaddr_l; /* individual address filter, low */
760 u32 gaddr_h; /* group address filter, high */
761 u32 gaddr_l; /* group address filter, low */
762 ucc_geth_82xx_enet_address_t taddr;
763 ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
764 u8 res0[0x40 - 0x38];
765} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
766
767/* GETH Tx firmware statistics structure, used when calling
768 UCC_GETH_GetStatistics. */
769typedef struct ucc_geth_tx_firmware_statistics {
770 u32 sicoltx; /* single collision */
771 u32 mulcoltx; /* multiple collision */
772 u32 latecoltxfr; /* late collision */
773 u32 frabortduecol; /* frames aborted due to transmit collision */
774 u32 frlostinmactxer; /* frames lost due to internal MAC error
775 transmission that are not counted on any
776 other counter */
777 u32 carriersenseertx; /* carrier sense error */
778 u32 frtxok; /* frames transmitted OK */
779 u32 txfrexcessivedefer; /* frames with defferal time greater than
780 specified threshold */
781 u32 txpkts256; /* total packets (including bad) between 256
782 and 511 octets */
783 u32 txpkts512; /* total packets (including bad) between 512
784 and 1023 octets */
785 u32 txpkts1024; /* total packets (including bad) between 1024
786 and 1518 octets */
787 u32 txpktsjumbo; /* total packets (including bad) between 1024
788 and MAXLength octets */
789} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
790
791/* GETH Rx firmware statistics structure, used when calling
792 UCC_GETH_GetStatistics. */
793typedef struct ucc_geth_rx_firmware_statistics {
794 u32 frrxfcser; /* frames with crc error */
795 u32 fraligner; /* frames with alignment error */
796 u32 inrangelenrxer; /* in range length error */
797 u32 outrangelenrxer; /* out of range length error */
798 u32 frtoolong; /* frame too long */
799 u32 runt; /* runt */
800 u32 verylongevent; /* very long event */
801 u32 symbolerror; /* symbol error */
802 u32 dropbsy; /* drop because of BD not ready */
803 u8 res0[0x8];
804 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
805 or type mismatch) */
806 u32 underpkts; /* total frames less than 64 octets */
807 u32 pkts256; /* total frames (including bad) between 256 and
808 511 octets */
809 u32 pkts512; /* total frames (including bad) between 512 and
810 1023 octets */
811 u32 pkts1024; /* total frames (including bad) between 1024
812 and 1518 octets */
813 u32 pktsjumbo; /* total frames (including bad) between 1024
814 and MAXLength octets */
815 u32 frlossinmacer; /* frames lost because of internal MAC error
816 that is not counted in any other counter */
817 u32 pausefr; /* pause frames */
818 u8 res1[0x4];
819 u32 removevlan; /* total frames that had their VLAN tag removed
820 */
821 u32 replacevlan; /* total frames that had their VLAN tag
822 replaced */
823 u32 insertvlan; /* total frames that had their VLAN tag
824 inserted */
825} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
826
827/* GETH hardware statistics structure, used when calling
828 UCC_GETH_GetStatistics. */
829typedef struct ucc_geth_hardware_statistics {
830 u32 tx64; /* Total number of frames (including bad
831 frames) transmitted that were exactly of the
832 minimal length (64 for un tagged, 68 for
833 tagged, or with length exactly equal to the
834 parameter MINLength */
835 u32 tx127; /* Total number of frames (including bad
836 frames) transmitted that were between
837 MINLength (Including FCS length==4) and 127
838 octets */
839 u32 tx255; /* Total number of frames (including bad
840 frames) transmitted that were between 128
841 (Including FCS length==4) and 255 octets */
842 u32 rx64; /* Total number of frames received including
843 bad frames that were exactly of the mninimal
844 length (64 bytes) */
845 u32 rx127; /* Total number of frames (including bad
846 frames) received that were between MINLength
847 (Including FCS length==4) and 127 octets */
848 u32 rx255; /* Total number of frames (including bad
849 frames) received that were between 128
850 (Including FCS length==4) and 255 octets */
851 u32 txok; /* Total number of octets residing in frames
852 that where involved in succesfull
853 transmission */
854 u16 txcf; /* Total number of PAUSE control frames
855 transmitted by this MAC */
856 u32 tmca; /* Total number of frames that were transmitted
857 succesfully with the group address bit set
858 that are not broadcast frames */
859 u32 tbca; /* Total number of frames transmitted
860 succesfully that had destination address
861 field equal to the broadcast address */
862 u32 rxfok; /* Total number of frames received OK */
863 u32 rxbok; /* Total number of octets received OK */
864 u32 rbyt; /* Total number of octets received including
865 octets in bad frames. Must be implemented in
866 HW because it includes octets in frames that
867 never even reach the UCC */
868 u32 rmca; /* Total number of frames that were received
869 succesfully with the group address bit set
870 that are not broadcast frames */
871 u32 rbca; /* Total number of frames received succesfully
872 that had destination address equal to the
873 broadcast address */
874} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
875
876/* UCC GETH Tx errors returned via TxConf callback */
877#define TX_ERRORS_DEF 0x0200
878#define TX_ERRORS_EXDEF 0x0100
879#define TX_ERRORS_LC 0x0080
880#define TX_ERRORS_RL 0x0040
881#define TX_ERRORS_RC_MASK 0x003C
882#define TX_ERRORS_RC_SHIFT 2
883#define TX_ERRORS_UN 0x0002
884#define TX_ERRORS_CSL 0x0001
885
886/* UCC GETH Rx errors returned via RxStore callback */
887#define RX_ERRORS_CMR 0x0200
888#define RX_ERRORS_M 0x0100
889#define RX_ERRORS_BC 0x0080
890#define RX_ERRORS_MC 0x0040
891
892/* Transmit BD. These are in addition to values defined in uccf. */
893#define T_VID 0x003c0000 /* insert VLAN id index mask. */
894#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
895#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
896#define T_LC (((u32) TX_ERRORS_LC ) << 16)
897#define T_RL (((u32) TX_ERRORS_RL ) << 16)
898#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
899#define T_UN (((u32) TX_ERRORS_UN ) << 16)
900#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
901#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
902 | T_UN | T_CSL) /* transmit errors to report */
903
904/* Receive BD. These are in addition to values defined in uccf. */
905#define R_LG 0x00200000 /* Frame length violation. */
906#define R_NO 0x00100000 /* Non-octet aligned frame. */
907#define R_SH 0x00080000 /* Short frame. */
908#define R_CR 0x00040000 /* CRC error. */
909#define R_OV 0x00020000 /* Overrun. */
910#define R_IPCH 0x00010000 /* IP checksum check failed. */
911#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
912#define R_M (((u32) RX_ERRORS_M ) << 16)
913#define R_BC (((u32) RX_ERRORS_BC ) << 16)
914#define R_MC (((u32) RX_ERRORS_MC ) << 16)
915#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
916 report */
917#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
918 R_OV | R_IPCH) /* receive errors to discard */
919
920/* Alignments */
921#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
922#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
923#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
924#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
925#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
926 based on num of
927 threads, but always
928 using the maximum is
929 easier */
930#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
931#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
932#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
933#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
934#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a
935 guess */
936#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
937#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
938#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
939 is a
940 guess
941 */
942#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
943#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
944#define UCC_GETH_MRBLR_ALIGNMENT 128
945#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
946#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
947#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
948
949#define UCC_GETH_TAD_EF 0x80
950#define UCC_GETH_TAD_V 0x40
951#define UCC_GETH_TAD_REJ 0x20
952#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
953#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
954#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
955#define UCC_GETH_TAD_RQOS_SHIFT 0
956#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
957#define UCC_GETH_TAD_CFI 0x10
958
959#define UCC_GETH_VLAN_PRIORITY_MAX 8
960#define UCC_GETH_IP_PRIORITY_MAX 64
961#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
962#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
963#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
964
965#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
966
967/* Driver definitions */
968#define TX_BD_RING_LEN 0x10
969#define RX_BD_RING_LEN 0x10
970#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
971
972#define TX_RING_MOD_MASK(size) (size-1)
973#define RX_RING_MOD_MASK(size) (size-1)
974
975#define ENET_NUM_OCTETS_PER_ADDRESS 6
976#define ENET_GROUP_ADDR 0x01 /* Group address mask
977 for ethernet
978 addresses */
979
980#define TX_TIMEOUT (1*HZ)
981#define SKB_ALLOC_TIMEOUT 100000
982#define PHY_INIT_TIMEOUT 100000
983#define PHY_CHANGE_TIME 2
984
985/* Fast Ethernet (10/100 Mbps) */
986#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
987 */
988#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
989#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
990#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
991 */
992#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
993#define UCC_GETH_UTFTT_INIT 128
994/* Gigabit Ethernet (1000 Mbps) */
995#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
996 FIFO size */
997#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
998#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
999#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
1000 FIFO size */
1001#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
1002#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
1003
1004#define UCC_GETH_REMODER_INIT 0 /* bits that must be
1005 set */
1006#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
1007#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
1008 for this
1009 register */
1010#define UCC_GETH_MACCFG1_INIT 0
1011#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
1012#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \
1013 (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
1014
1015/* Ethernet speed */
1016typedef enum enet_speed {
1017 ENET_SPEED_10BT, /* 10 Base T */
1018 ENET_SPEED_100BT, /* 100 Base T */
1019 ENET_SPEED_1000BT /* 1000 Base T */
1020} enet_speed_e;
1021
1022/* Ethernet Address Type. */
1023typedef enum enet_addr_type {
1024 ENET_ADDR_TYPE_INDIVIDUAL,
1025 ENET_ADDR_TYPE_GROUP,
1026 ENET_ADDR_TYPE_BROADCAST
1027} enet_addr_type_e;
1028
1029/* TBI / MII Set Register */
1030typedef enum enet_tbi_mii_reg {
1031 ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
1032 ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
1033 ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
1034 ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability
1035 (ANLPBPA) */
1036 ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */
1037 ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */
1038 ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page
1039 (ANLPANP) */
1040 ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
1041 ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
1042 ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
1043} enet_tbi_mii_reg_e;
1044
1045/* UCC GETH 82xx Ethernet Address Recognition Location */
1046typedef enum ucc_geth_enet_address_recognition_location {
1047 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
1048 address */
1049 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
1050 station
1051 address
1052 paddr1 */
1053 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
1054 station
1055 address
1056 paddr2 */
1057 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
1058 station
1059 address
1060 paddr3 */
1061 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
1062 station
1063 address
1064 paddr4 */
1065 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
1066 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
1067 hash */
1068} ucc_geth_enet_address_recognition_location_e;
1069
1070/* UCC GETH vlan operation tagged */
1071typedef enum ucc_geth_vlan_operation_tagged {
1072 UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
1073 UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
1074 = 0x1, /* Tagged - replace vid portion of q tag */
1075 UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
1076 = 0x2, /* Tagged - if vid0 replace vid with default value */
1077 UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
1078 = 0x3 /* Tagged - extract q tag from frame */
1079} ucc_geth_vlan_operation_tagged_e;
1080
1081/* UCC GETH vlan operation non-tagged */
1082typedef enum ucc_geth_vlan_operation_non_tagged {
1083 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
1084 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
1085 q tag insert
1086 */
1087} ucc_geth_vlan_operation_non_tagged_e;
1088
1089/* UCC GETH Rx Quality of Service Mode */
1090typedef enum ucc_geth_qos_mode {
1091 UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
1092 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
1093 determined
1094 by L2
1095 criteria */
1096 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
1097 determined
1098 by L3
1099 criteria */
1100} ucc_geth_qos_mode_e;
1101
1102/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
1103 for combined functionality */
1104typedef enum ucc_geth_statistics_gathering_mode {
1105 UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
1106 statistics
1107 gathering */
1108 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
1109 hardware
1110 statistics
1111 gathering
1112 */
1113 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
1114 firmware
1115 tx
1116 statistics
1117 gathering
1118 */
1119 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
1120 firmware
1121 rx
1122 statistics
1123 gathering
1124 */
1125} ucc_geth_statistics_gathering_mode_e;
1126
1127/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
1128typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
1129 UCC_GETH_PAD_AND_CRC_MODE_NONE
1130 = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
1131 short frames
1132 nor CRC */
1133 UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
1134 = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
1135 CRC only */
1136 UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
1137 MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
1138} ucc_geth_maccfg2_pad_and_crc_mode_e;
1139
1140/* UCC GETH upsmr Flow Control Mode */
1141typedef enum ucc_geth_flow_control_mode {
1142 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
1143 flow control
1144 */
1145 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
1146 = 0x00004000 /* Send pause frame when RxFIFO reaches its
1147 emergency threshold */
1148} ucc_geth_flow_control_mode_e;
1149
1150/* UCC GETH number of threads */
1151typedef enum ucc_geth_num_of_threads {
1152 UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
1153 UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
1154 UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
1155 UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
1156 UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
1157} ucc_geth_num_of_threads_e;
1158
1159/* UCC GETH number of station addresses */
1160typedef enum ucc_geth_num_of_station_addresses {
1161 UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
1162 UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
1163} ucc_geth_num_of_station_addresses_e;
1164
1165typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
1166
1167/* UCC GETH 82xx Ethernet Address Container */
1168typedef struct enet_addr_container {
1169 enet_addr_t address; /* ethernet address */
1170 ucc_geth_enet_address_recognition_location_e location; /* location in
1171 82xx address
1172 recognition
1173 hardware */
1174 struct list_head node;
1175} enet_addr_container_t;
1176
1177#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
1178
1179/* UCC GETH Termination Action Descriptor (TAD) structure. */
1180typedef struct ucc_geth_tad_params {
1181 int rx_non_dynamic_extended_features_mode;
1182 int reject_frame;
1183 ucc_geth_vlan_operation_tagged_e vtag_op;
1184 ucc_geth_vlan_operation_non_tagged_e vnontag_op;
1185 ucc_geth_qos_mode_e rqos;
1186 u8 vpri;
1187 u16 vid;
1188} ucc_geth_tad_params_t;
1189
1190/* GETH protocol initialization structure */
1191typedef struct ucc_geth_info {
1192 ucc_fast_info_t uf_info;
1193 u8 numQueuesTx;
1194 u8 numQueuesRx;
1195 int ipCheckSumCheck;
1196 int ipCheckSumGenerate;
1197 int rxExtendedFiltering;
1198 u32 extendedFilteringChainPointer;
1199 u16 typeorlen;
1200 int dynamicMaxFrameLength;
1201 int dynamicMinFrameLength;
1202 u8 nonBackToBackIfgPart1;
1203 u8 nonBackToBackIfgPart2;
1204 u8 miminumInterFrameGapEnforcement;
1205 u8 backToBackInterFrameGap;
1206 int ipAddressAlignment;
1207 int lengthCheckRx;
1208 u32 mblinterval;
1209 u16 nortsrbytetime;
1210 u8 fracsiz;
1211 u8 strictpriorityq;
1212 u8 txasap;
1213 u8 extrabw;
1214 int miiPreambleSupress;
1215 u8 altBebTruncation;
1216 int altBeb;
1217 int backPressureNoBackoff;
1218 int noBackoff;
1219 int excessDefer;
1220 u8 maxRetransmission;
1221 u8 collisionWindow;
1222 int pro;
1223 int cap;
1224 int rsh;
1225 int rlpb;
1226 int cam;
1227 int bro;
1228 int ecm;
1229 int receiveFlowControl;
1230 u8 maxGroupAddrInHash;
1231 u8 maxIndAddrInHash;
1232 u8 prel;
1233 u16 maxFrameLength;
1234 u16 minFrameLength;
1235 u16 maxD1Length;
1236 u16 maxD2Length;
1237 u16 vlantype;
1238 u16 vlantci;
1239 u32 ecamptr;
1240 u32 eventRegMask;
1241 u16 pausePeriod;
1242 u16 extensionField;
1243 u8 phy_address;
1244 u32 board_flags;
1245 u32 phy_interrupt;
1246 u8 weightfactor[NUM_TX_QUEUES];
1247 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1248 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
1249 u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
1250 u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
1251 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
1252 u16 bdRingLenTx[NUM_TX_QUEUES];
1253 u16 bdRingLenRx[NUM_RX_QUEUES];
1254 enet_interface_e enet_interface;
1255 ucc_geth_num_of_station_addresses_e numStationAddresses;
1256 qe_fltr_largest_external_tbl_lookup_key_size_e
1257 largestexternallookupkeysize;
1258 ucc_geth_statistics_gathering_mode_e statisticsMode;
1259 ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
1260 ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
1261 ucc_geth_qos_mode_e rxQoSMode;
1262 ucc_geth_flow_control_mode_e aufc;
1263 ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
1264 ucc_geth_num_of_threads_e numThreadsTx;
1265 ucc_geth_num_of_threads_e numThreadsRx;
1266 qe_risc_allocation_e riscTx;
1267 qe_risc_allocation_e riscRx;
1268} ucc_geth_info_t;
1269
1270/* structure representing UCC GETH */
1271typedef struct ucc_geth_private {
1272 ucc_geth_info_t *ug_info;
1273 ucc_fast_private_t *uccf;
1274 struct net_device *dev;
1275 struct net_device_stats stats; /* linux network statistics */
1276 ucc_geth_t *ug_regs;
1277 ucc_geth_init_pram_t *p_init_enet_param_shadow;
1278 ucc_geth_exf_global_pram_t *p_exf_glbl_param;
1279 u32 exf_glbl_param_offset;
1280 ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
1281 u32 rx_glbl_pram_offset;
1282 ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
1283 u32 tx_glbl_pram_offset;
1284 ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
1285 u32 send_q_mem_reg_offset;
1286 ucc_geth_thread_data_tx_t *p_thread_data_tx;
1287 u32 thread_dat_tx_offset;
1288 ucc_geth_thread_data_rx_t *p_thread_data_rx;
1289 u32 thread_dat_rx_offset;
1290 ucc_geth_scheduler_t *p_scheduler;
1291 u32 scheduler_offset;
1292 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
1293 u32 tx_fw_statistics_pram_offset;
1294 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
1295 u32 rx_fw_statistics_pram_offset;
1296 ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
1297 u32 rx_irq_coalescing_tbl_offset;
1298 ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
1299 u32 rx_bd_qs_tbl_offset;
1300 u8 *p_tx_bd_ring[NUM_TX_QUEUES];
1301 u32 tx_bd_ring_offset[NUM_TX_QUEUES];
1302 u8 *p_rx_bd_ring[NUM_RX_QUEUES];
1303 u32 rx_bd_ring_offset[NUM_RX_QUEUES];
1304 u8 *confBd[NUM_TX_QUEUES];
1305 u8 *txBd[NUM_TX_QUEUES];
1306 u8 *rxBd[NUM_RX_QUEUES];
1307 int badFrame[NUM_RX_QUEUES];
1308 u16 cpucount[NUM_TX_QUEUES];
1309 volatile u16 *p_cpucount[NUM_TX_QUEUES];
1310 int indAddrRegUsed[NUM_OF_PADDRS];
1311 enet_addr_t paddr[NUM_OF_PADDRS];
1312 u8 numGroupAddrInHash;
1313 u8 numIndAddrInHash;
1314 u8 numIndAddrInReg;
1315 int rx_extended_features;
1316 int rx_non_dynamic_extended_features;
1317 struct list_head conf_skbs;
1318 struct list_head group_hash_q;
1319 struct list_head ind_hash_q;
1320 u32 saved_uccm;
1321 spinlock_t lock;
1322 /* pointers to arrays of skbuffs for tx and rx */
1323 struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
1324 struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
1325 /* indices pointing to the next free sbk in skb arrays */
1326 u16 skb_curtx[NUM_TX_QUEUES];
1327 u16 skb_currx[NUM_RX_QUEUES];
1328 /* index of the first skb which hasn't been transmitted yet. */
1329 u16 skb_dirtytx[NUM_TX_QUEUES];
1330
1331 struct work_struct tq;
1332 struct timer_list phy_info_timer;
1333 struct ugeth_mii_info *mii_info;
1334 int oldspeed;
1335 int oldduplex;
1336 int oldlink;
1337} ucc_geth_private_t;
1338
1339#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
new file mode 100644
index 00000000000..f91028c5386
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.c
@@ -0,0 +1,801 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/version.h>
36#include <linux/crc32.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "ucc_geth.h"
45#include "ucc_geth_phy.h"
46#include <platforms/83xx/mpc8360e_pb.h>
47
48#define ugphy_printk(level, format, arg...) \
49 printk(level format "\n", ## arg)
50
51#define ugphy_dbg(format, arg...) \
52 ugphy_printk(KERN_DEBUG, format , ## arg)
53#define ugphy_err(format, arg...) \
54 ugphy_printk(KERN_ERR, format , ## arg)
55#define ugphy_info(format, arg...) \
56 ugphy_printk(KERN_INFO, format , ## arg)
57#define ugphy_warn(format, arg...) \
58 ugphy_printk(KERN_WARNING, format , ## arg)
59
60#ifdef UGETH_VERBOSE_DEBUG
61#define ugphy_vdbg ugphy_dbg
62#else
63#define ugphy_vdbg(fmt, args...) do { } while (0)
64#endif /* UGETH_VERBOSE_DEBUG */
65
66static void config_genmii_advert(struct ugeth_mii_info *mii_info);
67static void genmii_setup_forced(struct ugeth_mii_info *mii_info);
68static void genmii_restart_aneg(struct ugeth_mii_info *mii_info);
69static int gbit_config_aneg(struct ugeth_mii_info *mii_info);
70static int genmii_config_aneg(struct ugeth_mii_info *mii_info);
71static int genmii_update_link(struct ugeth_mii_info *mii_info);
72static int genmii_read_status(struct ugeth_mii_info *mii_info);
73u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
74void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
75
76static u8 *bcsr_regs = NULL;
77
78/* Write value to the PHY for this device to the register at regnum, */
79/* waiting until the write is done before it returns. All PHY */
80/* configuration has to be done through the TSEC1 MIIM regs */
81void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
82{
83 ucc_geth_private_t *ugeth = netdev_priv(dev);
84 ucc_mii_mng_t *mii_regs;
85 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
86 u32 tmp_reg;
87
88 ugphy_vdbg("%s: IN", __FUNCTION__);
89
90 spin_lock_irq(&ugeth->lock);
91
92 mii_regs = ugeth->mii_info->mii_regs;
93
94 /* Set this UCC to be the master of the MII managment */
95 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
96
97 /* Stop the MII management read cycle */
98 out_be32(&mii_regs->miimcom, 0);
99 /* Setting up the MII Mangement Address Register */
100 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
101 out_be32(&mii_regs->miimadd, tmp_reg);
102
103 /* Setting up the MII Mangement Control Register with the value */
104 out_be32(&mii_regs->miimcon, (u32) value);
105
106 /* Wait till MII management write is complete */
107 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
108 cpu_relax();
109
110 spin_unlock_irq(&ugeth->lock);
111
112 udelay(10000);
113}
114
115/* Reads from register regnum in the PHY for device dev, */
116/* returning the value. Clears miimcom first. All PHY */
117/* configuration has to be done through the TSEC1 MIIM regs */
118int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
119{
120 ucc_geth_private_t *ugeth = netdev_priv(dev);
121 ucc_mii_mng_t *mii_regs;
122 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
123 u32 tmp_reg;
124 u16 value;
125
126 ugphy_vdbg("%s: IN", __FUNCTION__);
127
128 spin_lock_irq(&ugeth->lock);
129
130 mii_regs = ugeth->mii_info->mii_regs;
131
132 /* Setting up the MII Mangement Address Register */
133 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
134 out_be32(&mii_regs->miimadd, tmp_reg);
135
136 /* Perform an MII management read cycle */
137 out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE);
138
139 /* Wait till MII management write is complete */
140 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
141 cpu_relax();
142
143 udelay(10000);
144
145 /* Read MII management status */
146 value = (u16) in_be32(&mii_regs->miimstat);
147 out_be32(&mii_regs->miimcom, 0);
148 if (value == 0xffff)
149 ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x",
150 mii_id, mii_reg, (u32) & (mii_regs->miimcfg));
151
152 spin_unlock_irq(&ugeth->lock);
153
154 return (value);
155}
156
157void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info)
158{
159 ugphy_vdbg("%s: IN", __FUNCTION__);
160
161 if (mii_info->phyinfo->ack_interrupt)
162 mii_info->phyinfo->ack_interrupt(mii_info);
163}
164
165void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
166 u32 interrupts)
167{
168 ugphy_vdbg("%s: IN", __FUNCTION__);
169
170 mii_info->interrupts = interrupts;
171 if (mii_info->phyinfo->config_intr)
172 mii_info->phyinfo->config_intr(mii_info);
173}
174
175/* Writes MII_ADVERTISE with the appropriate values, after
176 * sanitizing advertise to make sure only supported features
177 * are advertised
178 */
179static void config_genmii_advert(struct ugeth_mii_info *mii_info)
180{
181 u32 advertise;
182 u16 adv;
183
184 ugphy_vdbg("%s: IN", __FUNCTION__);
185
186 /* Only allow advertising what this PHY supports */
187 mii_info->advertising &= mii_info->phyinfo->features;
188 advertise = mii_info->advertising;
189
190 /* Setup standard advertisement */
191 adv = phy_read(mii_info, MII_ADVERTISE);
192 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
193 if (advertise & ADVERTISED_10baseT_Half)
194 adv |= ADVERTISE_10HALF;
195 if (advertise & ADVERTISED_10baseT_Full)
196 adv |= ADVERTISE_10FULL;
197 if (advertise & ADVERTISED_100baseT_Half)
198 adv |= ADVERTISE_100HALF;
199 if (advertise & ADVERTISED_100baseT_Full)
200 adv |= ADVERTISE_100FULL;
201 phy_write(mii_info, MII_ADVERTISE, adv);
202}
203
204static void genmii_setup_forced(struct ugeth_mii_info *mii_info)
205{
206 u16 ctrl;
207 u32 features = mii_info->phyinfo->features;
208
209 ugphy_vdbg("%s: IN", __FUNCTION__);
210
211 ctrl = phy_read(mii_info, MII_BMCR);
212
213 ctrl &=
214 ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
215 ctrl |= BMCR_RESET;
216
217 switch (mii_info->speed) {
218 case SPEED_1000:
219 if (features & (SUPPORTED_1000baseT_Half
220 | SUPPORTED_1000baseT_Full)) {
221 ctrl |= BMCR_SPEED1000;
222 break;
223 }
224 mii_info->speed = SPEED_100;
225 case SPEED_100:
226 if (features & (SUPPORTED_100baseT_Half
227 | SUPPORTED_100baseT_Full)) {
228 ctrl |= BMCR_SPEED100;
229 break;
230 }
231 mii_info->speed = SPEED_10;
232 case SPEED_10:
233 if (features & (SUPPORTED_10baseT_Half
234 | SUPPORTED_10baseT_Full))
235 break;
236 default: /* Unsupported speed! */
237 ugphy_err("%s: Bad speed!", mii_info->dev->name);
238 break;
239 }
240
241 phy_write(mii_info, MII_BMCR, ctrl);
242}
243
244/* Enable and Restart Autonegotiation */
245static void genmii_restart_aneg(struct ugeth_mii_info *mii_info)
246{
247 u16 ctl;
248
249 ugphy_vdbg("%s: IN", __FUNCTION__);
250
251 ctl = phy_read(mii_info, MII_BMCR);
252 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
253 phy_write(mii_info, MII_BMCR, ctl);
254}
255
256static int gbit_config_aneg(struct ugeth_mii_info *mii_info)
257{
258 u16 adv;
259 u32 advertise;
260
261 ugphy_vdbg("%s: IN", __FUNCTION__);
262
263 if (mii_info->autoneg) {
264 /* Configure the ADVERTISE register */
265 config_genmii_advert(mii_info);
266 advertise = mii_info->advertising;
267
268 adv = phy_read(mii_info, MII_1000BASETCONTROL);
269 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
270 MII_1000BASETCONTROL_HALFDUPLEXCAP);
271 if (advertise & SUPPORTED_1000baseT_Half)
272 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
273 if (advertise & SUPPORTED_1000baseT_Full)
274 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
275 phy_write(mii_info, MII_1000BASETCONTROL, adv);
276
277 /* Start/Restart aneg */
278 genmii_restart_aneg(mii_info);
279 } else
280 genmii_setup_forced(mii_info);
281
282 return 0;
283}
284
285static int genmii_config_aneg(struct ugeth_mii_info *mii_info)
286{
287 ugphy_vdbg("%s: IN", __FUNCTION__);
288
289 if (mii_info->autoneg) {
290 config_genmii_advert(mii_info);
291 genmii_restart_aneg(mii_info);
292 } else
293 genmii_setup_forced(mii_info);
294
295 return 0;
296}
297
298static int genmii_update_link(struct ugeth_mii_info *mii_info)
299{
300 u16 status;
301
302 ugphy_vdbg("%s: IN", __FUNCTION__);
303
304 /* Do a fake read */
305 phy_read(mii_info, MII_BMSR);
306
307 /* Read link and autonegotiation status */
308 status = phy_read(mii_info, MII_BMSR);
309 if ((status & BMSR_LSTATUS) == 0)
310 mii_info->link = 0;
311 else
312 mii_info->link = 1;
313
314 /* If we are autonegotiating, and not done,
315 * return an error */
316 if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
317 return -EAGAIN;
318
319 return 0;
320}
321
322static int genmii_read_status(struct ugeth_mii_info *mii_info)
323{
324 u16 status;
325 int err;
326
327 ugphy_vdbg("%s: IN", __FUNCTION__);
328
329 /* Update the link, but return if there
330 * was an error */
331 err = genmii_update_link(mii_info);
332 if (err)
333 return err;
334
335 if (mii_info->autoneg) {
336 status = phy_read(mii_info, MII_LPA);
337
338 if (status & (LPA_10FULL | LPA_100FULL))
339 mii_info->duplex = DUPLEX_FULL;
340 else
341 mii_info->duplex = DUPLEX_HALF;
342 if (status & (LPA_100FULL | LPA_100HALF))
343 mii_info->speed = SPEED_100;
344 else
345 mii_info->speed = SPEED_10;
346 mii_info->pause = 0;
347 }
348 /* On non-aneg, we assume what we put in BMCR is the speed,
349 * though magic-aneg shouldn't prevent this case from occurring
350 */
351
352 return 0;
353}
354
355static int marvell_init(struct ugeth_mii_info *mii_info)
356{
357 ugphy_vdbg("%s: IN", __FUNCTION__);
358
359 phy_write(mii_info, 0x14, 0x0cd2);
360 phy_write(mii_info, MII_BMCR,
361 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
362 msleep(4000);
363
364 return 0;
365}
366
367static int marvell_config_aneg(struct ugeth_mii_info *mii_info)
368{
369 ugphy_vdbg("%s: IN", __FUNCTION__);
370
371 /* The Marvell PHY has an errata which requires
372 * that certain registers get written in order
373 * to restart autonegotiation */
374 phy_write(mii_info, MII_BMCR, BMCR_RESET);
375
376 phy_write(mii_info, 0x1d, 0x1f);
377 phy_write(mii_info, 0x1e, 0x200c);
378 phy_write(mii_info, 0x1d, 0x5);
379 phy_write(mii_info, 0x1e, 0);
380 phy_write(mii_info, 0x1e, 0x100);
381
382 gbit_config_aneg(mii_info);
383
384 return 0;
385}
386
387static int marvell_read_status(struct ugeth_mii_info *mii_info)
388{
389 u16 status;
390 int err;
391
392 ugphy_vdbg("%s: IN", __FUNCTION__);
393
394 /* Update the link, but return if there
395 * was an error */
396 err = genmii_update_link(mii_info);
397 if (err)
398 return err;
399
400 /* If the link is up, read the speed and duplex */
401 /* If we aren't autonegotiating, assume speeds
402 * are as set */
403 if (mii_info->autoneg && mii_info->link) {
404 int speed;
405 status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
406
407 /* Get the duplexity */
408 if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
409 mii_info->duplex = DUPLEX_FULL;
410 else
411 mii_info->duplex = DUPLEX_HALF;
412
413 /* Get the speed */
414 speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
415 switch (speed) {
416 case MII_M1011_PHY_SPEC_STATUS_1000:
417 mii_info->speed = SPEED_1000;
418 break;
419 case MII_M1011_PHY_SPEC_STATUS_100:
420 mii_info->speed = SPEED_100;
421 break;
422 default:
423 mii_info->speed = SPEED_10;
424 break;
425 }
426 mii_info->pause = 0;
427 }
428
429 return 0;
430}
431
432static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info)
433{
434 ugphy_vdbg("%s: IN", __FUNCTION__);
435
436 /* Clear the interrupts by reading the reg */
437 phy_read(mii_info, MII_M1011_IEVENT);
438
439 return 0;
440}
441
442static int marvell_config_intr(struct ugeth_mii_info *mii_info)
443{
444 ugphy_vdbg("%s: IN", __FUNCTION__);
445
446 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
447 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
448 else
449 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
450
451 return 0;
452}
453
454static int cis820x_init(struct ugeth_mii_info *mii_info)
455{
456 ugphy_vdbg("%s: IN", __FUNCTION__);
457
458 phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
459 MII_CIS8201_AUXCONSTAT_INIT);
460 phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT);
461
462 return 0;
463}
464
465static int cis820x_read_status(struct ugeth_mii_info *mii_info)
466{
467 u16 status;
468 int err;
469
470 ugphy_vdbg("%s: IN", __FUNCTION__);
471
472 /* Update the link, but return if there
473 * was an error */
474 err = genmii_update_link(mii_info);
475 if (err)
476 return err;
477
478 /* If the link is up, read the speed and duplex */
479 /* If we aren't autonegotiating, assume speeds
480 * are as set */
481 if (mii_info->autoneg && mii_info->link) {
482 int speed;
483
484 status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
485 if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
486 mii_info->duplex = DUPLEX_FULL;
487 else
488 mii_info->duplex = DUPLEX_HALF;
489
490 speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
491
492 switch (speed) {
493 case MII_CIS8201_AUXCONSTAT_GBIT:
494 mii_info->speed = SPEED_1000;
495 break;
496 case MII_CIS8201_AUXCONSTAT_100:
497 mii_info->speed = SPEED_100;
498 break;
499 default:
500 mii_info->speed = SPEED_10;
501 break;
502 }
503 }
504
505 return 0;
506}
507
508static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info)
509{
510 ugphy_vdbg("%s: IN", __FUNCTION__);
511
512 phy_read(mii_info, MII_CIS8201_ISTAT);
513
514 return 0;
515}
516
517static int cis820x_config_intr(struct ugeth_mii_info *mii_info)
518{
519 ugphy_vdbg("%s: IN", __FUNCTION__);
520
521 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
522 phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
523 else
524 phy_write(mii_info, MII_CIS8201_IMASK, 0);
525
526 return 0;
527}
528
529#define DM9161_DELAY 10
530
531static int dm9161_read_status(struct ugeth_mii_info *mii_info)
532{
533 u16 status;
534 int err;
535
536 ugphy_vdbg("%s: IN", __FUNCTION__);
537
538 /* Update the link, but return if there
539 * was an error */
540 err = genmii_update_link(mii_info);
541 if (err)
542 return err;
543
544 /* If the link is up, read the speed and duplex */
545 /* If we aren't autonegotiating, assume speeds
546 * are as set */
547 if (mii_info->autoneg && mii_info->link) {
548 status = phy_read(mii_info, MII_DM9161_SCSR);
549 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
550 mii_info->speed = SPEED_100;
551 else
552 mii_info->speed = SPEED_10;
553
554 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
555 mii_info->duplex = DUPLEX_FULL;
556 else
557 mii_info->duplex = DUPLEX_HALF;
558 }
559
560 return 0;
561}
562
563static int dm9161_config_aneg(struct ugeth_mii_info *mii_info)
564{
565 struct dm9161_private *priv = mii_info->priv;
566
567 ugphy_vdbg("%s: IN", __FUNCTION__);
568
569 if (0 == priv->resetdone)
570 return -EAGAIN;
571
572 return 0;
573}
574
575static void dm9161_timer(unsigned long data)
576{
577 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
578 struct dm9161_private *priv = mii_info->priv;
579 u16 status = phy_read(mii_info, MII_BMSR);
580
581 ugphy_vdbg("%s: IN", __FUNCTION__);
582
583 if (status & BMSR_ANEGCOMPLETE) {
584 priv->resetdone = 1;
585 } else
586 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
587}
588
589static int dm9161_init(struct ugeth_mii_info *mii_info)
590{
591 struct dm9161_private *priv;
592
593 ugphy_vdbg("%s: IN", __FUNCTION__);
594
595 /* Allocate the private data structure */
596 priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
597
598 if (NULL == priv)
599 return -ENOMEM;
600
601 mii_info->priv = priv;
602
603 /* Reset is not done yet */
604 priv->resetdone = 0;
605
606 phy_write(mii_info, MII_BMCR,
607 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
608
609 phy_write(mii_info, MII_BMCR,
610 phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE);
611
612 config_genmii_advert(mii_info);
613 /* Start/Restart aneg */
614 genmii_config_aneg(mii_info);
615
616 /* Start a timer for DM9161_DELAY seconds to wait
617 * for the PHY to be ready */
618 init_timer(&priv->timer);
619 priv->timer.function = &dm9161_timer;
620 priv->timer.data = (unsigned long)mii_info;
621 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
622
623 return 0;
624}
625
626static void dm9161_close(struct ugeth_mii_info *mii_info)
627{
628 struct dm9161_private *priv = mii_info->priv;
629
630 ugphy_vdbg("%s: IN", __FUNCTION__);
631
632 del_timer_sync(&priv->timer);
633 kfree(priv);
634}
635
636static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
637{
638/* FIXME: This lines are for BUG fixing in the mpc8325.
639Remove this from here when it's fixed */
640 if (bcsr_regs == NULL)
641 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
642 bcsr_regs[14] |= 0x40;
643 ugphy_vdbg("%s: IN", __FUNCTION__);
644
645 /* Clear the interrupts by reading the reg */
646 phy_read(mii_info, MII_DM9161_INTR);
647
648
649 return 0;
650}
651
652static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
653{
654/* FIXME: This lines are for BUG fixing in the mpc8325.
655Remove this from here when it's fixed */
656 if (bcsr_regs == NULL) {
657 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
658 bcsr_regs[14] &= ~0x40;
659 }
660 ugphy_vdbg("%s: IN", __FUNCTION__);
661
662 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
663 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
664 else
665 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
666
667 return 0;
668}
669
670/* Cicada 820x */
671static struct phy_info phy_info_cis820x = {
672 .phy_id = 0x000fc440,
673 .name = "Cicada Cis8204",
674 .phy_id_mask = 0x000fffc0,
675 .features = MII_GBIT_FEATURES,
676 .init = &cis820x_init,
677 .config_aneg = &gbit_config_aneg,
678 .read_status = &cis820x_read_status,
679 .ack_interrupt = &cis820x_ack_interrupt,
680 .config_intr = &cis820x_config_intr,
681};
682
683static struct phy_info phy_info_dm9161 = {
684 .phy_id = 0x0181b880,
685 .phy_id_mask = 0x0ffffff0,
686 .name = "Davicom DM9161E",
687 .init = dm9161_init,
688 .config_aneg = dm9161_config_aneg,
689 .read_status = dm9161_read_status,
690 .close = dm9161_close,
691};
692
693static struct phy_info phy_info_dm9161a = {
694 .phy_id = 0x0181b8a0,
695 .phy_id_mask = 0x0ffffff0,
696 .name = "Davicom DM9161A",
697 .features = MII_BASIC_FEATURES,
698 .init = dm9161_init,
699 .config_aneg = dm9161_config_aneg,
700 .read_status = dm9161_read_status,
701 .ack_interrupt = dm9161_ack_interrupt,
702 .config_intr = dm9161_config_intr,
703 .close = dm9161_close,
704};
705
706static struct phy_info phy_info_marvell = {
707 .phy_id = 0x01410c00,
708 .phy_id_mask = 0xffffff00,
709 .name = "Marvell 88E11x1",
710 .features = MII_GBIT_FEATURES,
711 .init = &marvell_init,
712 .config_aneg = &marvell_config_aneg,
713 .read_status = &marvell_read_status,
714 .ack_interrupt = &marvell_ack_interrupt,
715 .config_intr = &marvell_config_intr,
716};
717
718static struct phy_info phy_info_genmii = {
719 .phy_id = 0x00000000,
720 .phy_id_mask = 0x00000000,
721 .name = "Generic MII",
722 .features = MII_BASIC_FEATURES,
723 .config_aneg = genmii_config_aneg,
724 .read_status = genmii_read_status,
725};
726
727static struct phy_info *phy_info[] = {
728 &phy_info_cis820x,
729 &phy_info_marvell,
730 &phy_info_dm9161,
731 &phy_info_dm9161a,
732 &phy_info_genmii,
733 NULL
734};
735
736u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum)
737{
738 u16 retval;
739 unsigned long flags;
740
741 ugphy_vdbg("%s: IN", __FUNCTION__);
742
743 spin_lock_irqsave(&mii_info->mdio_lock, flags);
744 retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
745 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
746
747 return retval;
748}
749
750void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val)
751{
752 unsigned long flags;
753
754 ugphy_vdbg("%s: IN", __FUNCTION__);
755
756 spin_lock_irqsave(&mii_info->mdio_lock, flags);
757 mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
758 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
759}
760
761/* Use the PHY ID registers to determine what type of PHY is attached
762 * to device dev. return a struct phy_info structure describing that PHY
763 */
764struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info)
765{
766 u16 phy_reg;
767 u32 phy_ID;
768 int i;
769 struct phy_info *theInfo = NULL;
770 struct net_device *dev = mii_info->dev;
771
772 ugphy_vdbg("%s: IN", __FUNCTION__);
773
774 /* Grab the bits from PHYIR1, and put them in the upper half */
775 phy_reg = phy_read(mii_info, MII_PHYSID1);
776 phy_ID = (phy_reg & 0xffff) << 16;
777
778 /* Grab the bits from PHYIR2, and put them in the lower half */
779 phy_reg = phy_read(mii_info, MII_PHYSID2);
780 phy_ID |= (phy_reg & 0xffff);
781
782 /* loop through all the known PHY types, and find one that */
783 /* matches the ID we read from the PHY. */
784 for (i = 0; phy_info[i]; i++)
785 if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){
786 theInfo = phy_info[i];
787 break;
788 }
789
790 /* This shouldn't happen, as we have generic PHY support */
791 if (theInfo == NULL) {
792 ugphy_info("%s: PHY id %x is not supported!", dev->name,
793 phy_ID);
794 return NULL;
795 } else {
796 ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name,
797 phy_ID);
798 }
799
800 return theInfo;
801}
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
new file mode 100644
index 00000000000..2f98b8f1bb0
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.h
@@ -0,0 +1,217 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __UCC_GETH_PHY_H__
20#define __UCC_GETH_PHY_H__
21
22#define MII_end ((u32)-2)
23#define MII_read ((u32)-1)
24
25#define MIIMIND_BUSY 0x00000001
26#define MIIMIND_NOTVALID 0x00000004
27
28#define UGETH_AN_TIMEOUT 2000
29
30/* 1000BT control (Marvell & BCM54xx at least) */
31#define MII_1000BASETCONTROL 0x09
32#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
33#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
34
35/* Cicada Extended Control Register 1 */
36#define MII_CIS8201_EXT_CON1 0x17
37#define MII_CIS8201_EXTCON1_INIT 0x0000
38
39/* Cicada Interrupt Mask Register */
40#define MII_CIS8201_IMASK 0x19
41#define MII_CIS8201_IMASK_IEN 0x8000
42#define MII_CIS8201_IMASK_SPEED 0x4000
43#define MII_CIS8201_IMASK_LINK 0x2000
44#define MII_CIS8201_IMASK_DUPLEX 0x1000
45#define MII_CIS8201_IMASK_MASK 0xf000
46
47/* Cicada Interrupt Status Register */
48#define MII_CIS8201_ISTAT 0x1a
49#define MII_CIS8201_ISTAT_STATUS 0x8000
50#define MII_CIS8201_ISTAT_SPEED 0x4000
51#define MII_CIS8201_ISTAT_LINK 0x2000
52#define MII_CIS8201_ISTAT_DUPLEX 0x1000
53
54/* Cicada Auxiliary Control/Status Register */
55#define MII_CIS8201_AUX_CONSTAT 0x1c
56#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
57#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
58#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
59#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
60#define MII_CIS8201_AUXCONSTAT_100 0x0008
61
62/* 88E1011 PHY Status Register */
63#define MII_M1011_PHY_SPEC_STATUS 0x11
64#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
65#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
66#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
67#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
68#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
69#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
70
71#define MII_M1011_IEVENT 0x13
72#define MII_M1011_IEVENT_CLEAR 0x0000
73
74#define MII_M1011_IMASK 0x12
75#define MII_M1011_IMASK_INIT 0x6400
76#define MII_M1011_IMASK_CLEAR 0x0000
77
78#define MII_DM9161_SCR 0x10
79#define MII_DM9161_SCR_INIT 0x0610
80
81/* DM9161 Specified Configuration and Status Register */
82#define MII_DM9161_SCSR 0x11
83#define MII_DM9161_SCSR_100F 0x8000
84#define MII_DM9161_SCSR_100H 0x4000
85#define MII_DM9161_SCSR_10F 0x2000
86#define MII_DM9161_SCSR_10H 0x1000
87
88/* DM9161 Interrupt Register */
89#define MII_DM9161_INTR 0x15
90#define MII_DM9161_INTR_PEND 0x8000
91#define MII_DM9161_INTR_DPLX_MASK 0x0800
92#define MII_DM9161_INTR_SPD_MASK 0x0400
93#define MII_DM9161_INTR_LINK_MASK 0x0200
94#define MII_DM9161_INTR_MASK 0x0100
95#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
96#define MII_DM9161_INTR_SPD_CHANGE 0x0008
97#define MII_DM9161_INTR_LINK_CHANGE 0x0004
98#define MII_DM9161_INTR_INIT 0x0000
99#define MII_DM9161_INTR_STOP \
100(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
101 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
102
103/* DM9161 10BT Configuration/Status */
104#define MII_DM9161_10BTCSR 0x12
105#define MII_DM9161_10BTCSR_INIT 0x7800
106
107#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
108 SUPPORTED_10baseT_Full | \
109 SUPPORTED_100baseT_Half | \
110 SUPPORTED_100baseT_Full | \
111 SUPPORTED_Autoneg | \
112 SUPPORTED_TP | \
113 SUPPORTED_MII)
114
115#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
116 SUPPORTED_1000baseT_Half | \
117 SUPPORTED_1000baseT_Full)
118
119#define MII_READ_COMMAND 0x00000001
120
121#define MII_INTERRUPT_DISABLED 0x0
122#define MII_INTERRUPT_ENABLED 0x1
123/* Taken from mii_if_info and sungem_phy.h */
124struct ugeth_mii_info {
125 /* Information about the PHY type */
126 /* And management functions */
127 struct phy_info *phyinfo;
128
129 ucc_mii_mng_t *mii_regs;
130
131 /* forced speed & duplex (no autoneg)
132 * partner speed & duplex & pause (autoneg)
133 */
134 int speed;
135 int duplex;
136 int pause;
137
138 /* The most recently read link state */
139 int link;
140
141 /* Enabled Interrupts */
142 u32 interrupts;
143
144 u32 advertising;
145 int autoneg;
146 int mii_id;
147
148 /* private data pointer */
149 /* For use by PHYs to maintain extra state */
150 void *priv;
151
152 /* Provided by host chip */
153 struct net_device *dev;
154
155 /* A lock to ensure that only one thing can read/write
156 * the MDIO bus at a time */
157 spinlock_t mdio_lock;
158
159 /* Provided by ethernet driver */
160 int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
161 void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
162 int val);
163};
164
165/* struct phy_info: a structure which defines attributes for a PHY
166 *
167 * id will contain a number which represents the PHY. During
168 * startup, the driver will poll the PHY to find out what its
169 * UID--as defined by registers 2 and 3--is. The 32-bit result
170 * gotten from the PHY will be ANDed with phy_id_mask to
171 * discard any bits which may change based on revision numbers
172 * unimportant to functionality
173 *
174 * There are 6 commands which take a ugeth_mii_info structure.
175 * Each PHY must declare config_aneg, and read_status.
176 */
177struct phy_info {
178 u32 phy_id;
179 char *name;
180 unsigned int phy_id_mask;
181 u32 features;
182
183 /* Called to initialize the PHY */
184 int (*init) (struct ugeth_mii_info * mii_info);
185
186 /* Called to suspend the PHY for power */
187 int (*suspend) (struct ugeth_mii_info * mii_info);
188
189 /* Reconfigures autonegotiation (or disables it) */
190 int (*config_aneg) (struct ugeth_mii_info * mii_info);
191
192 /* Determines the negotiated speed and duplex */
193 int (*read_status) (struct ugeth_mii_info * mii_info);
194
195 /* Clears any pending interrupts */
196 int (*ack_interrupt) (struct ugeth_mii_info * mii_info);
197
198 /* Enables or disables interrupts */
199 int (*config_intr) (struct ugeth_mii_info * mii_info);
200
201 /* Clears up any memory if needed */
202 void (*close) (struct ugeth_mii_info * mii_info);
203};
204
205struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info);
206void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
207int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
208void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info);
209void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
210 u32 interrupts);
211
212struct dm9161_private {
213 struct timer_list timer;
214 int resetdone;
215};
216
217#endif /* __UCC_GETH_PHY_H__ */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3d0ec97031..ae971080e2e 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.0" 33#define DRV_VERSION "1.4.1"
34#define DRV_RELDATE "June-27-2006" 34#define DRV_RELDATE "July-24-2006"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -44,6 +44,10 @@ static int max_interrupt_work = 20;
44 Setting to > 1518 effectively disables this feature. */ 44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak; 45static int rx_copybreak;
46 46
47/* Work-around for broken BIOSes: they are unable to get the chip back out of
48 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
49static int avoid_D3;
50
47/* 51/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they 52 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead. 53 * are gone. Use ethtool(8) instead.
@@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32;
63 There are no ill effects from too-large receive rings. */ 67 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16 68#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 69#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
70#ifdef CONFIG_VIA_RHINE_NAPI
71#define RX_RING_SIZE 64
72#else
66#define RX_RING_SIZE 16 73#define RX_RING_SIZE 16
74#endif
67 75
68 76
69/* Operational parameters that usually are not changed. */ 77/* Operational parameters that usually are not changed. */
@@ -116,9 +124,11 @@ MODULE_LICENSE("GPL");
116module_param(max_interrupt_work, int, 0); 124module_param(max_interrupt_work, int, 0);
117module_param(debug, int, 0); 125module_param(debug, int, 0);
118module_param(rx_copybreak, int, 0); 126module_param(rx_copybreak, int, 0);
127module_param(avoid_D3, bool, 0);
119MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 128MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
120MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 129MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
121MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 130MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
131MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
122 132
123/* 133/*
124 Theory of Operation 134 Theory of Operation
@@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev);
396static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 406static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
397static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 407static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
398static void rhine_tx(struct net_device *dev); 408static void rhine_tx(struct net_device *dev);
399static void rhine_rx(struct net_device *dev); 409static int rhine_rx(struct net_device *dev, int limit);
400static void rhine_error(struct net_device *dev, int intr_status); 410static void rhine_error(struct net_device *dev, int intr_status);
401static void rhine_set_rx_mode(struct net_device *dev); 411static void rhine_set_rx_mode(struct net_device *dev);
402static struct net_device_stats *rhine_get_stats(struct net_device *dev); 412static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev)
564} 574}
565#endif 575#endif
566 576
577#ifdef CONFIG_VIA_RHINE_NAPI
578static int rhine_napipoll(struct net_device *dev, int *budget)
579{
580 struct rhine_private *rp = netdev_priv(dev);
581 void __iomem *ioaddr = rp->base;
582 int done, limit = min(dev->quota, *budget);
583
584 done = rhine_rx(dev, limit);
585 *budget -= done;
586 dev->quota -= done;
587
588 if (done < limit) {
589 netif_rx_complete(dev);
590
591 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
592 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
593 IntrTxDone | IntrTxError | IntrTxUnderrun |
594 IntrPCIErr | IntrStatsMax | IntrLinkChange,
595 ioaddr + IntrEnable);
596 return 0;
597 }
598 else
599 return 1;
600}
601#endif
602
567static void rhine_hw_init(struct net_device *dev, long pioaddr) 603static void rhine_hw_init(struct net_device *dev, long pioaddr)
568{ 604{
569 struct rhine_private *rp = netdev_priv(dev); 605 struct rhine_private *rp = netdev_priv(dev);
@@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
744#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
745 dev->poll_controller = rhine_poll; 781 dev->poll_controller = rhine_poll;
746#endif 782#endif
783#ifdef CONFIG_VIA_RHINE_NAPI
784 dev->poll = rhine_napipoll;
785 dev->weight = 64;
786#endif
747 if (rp->quirks & rqRhineI) 787 if (rp->quirks & rqRhineI)
748 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 788 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
749 789
@@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
789 } 829 }
790 } 830 }
791 rp->mii_if.phy_id = phy_id; 831 rp->mii_if.phy_id = phy_id;
832 if (debug > 1 && avoid_D3)
833 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
834 dev->name);
792 835
793 return 0; 836 return 0;
794 837
@@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev)
1014 1057
1015 rhine_set_rx_mode(dev); 1058 rhine_set_rx_mode(dev);
1016 1059
1060 netif_poll_enable(dev);
1061
1017 /* Enable interrupts by setting the interrupt mask. */ 1062 /* Enable interrupts by setting the interrupt mask. */
1018 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1063 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1019 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 1064 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
1268 dev->name, intr_status); 1313 dev->name, intr_status);
1269 1314
1270 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1315 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1271 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) 1316 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1272 rhine_rx(dev); 1317#ifdef CONFIG_VIA_RHINE_NAPI
1318 iowrite16(IntrTxAborted |
1319 IntrTxDone | IntrTxError | IntrTxUnderrun |
1320 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1321 ioaddr + IntrEnable);
1322
1323 netif_rx_schedule(dev);
1324#else
1325 rhine_rx(dev, RX_RING_SIZE);
1326#endif
1327 }
1273 1328
1274 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1329 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1275 if (intr_status & IntrTxErrSummary) { 1330 if (intr_status & IntrTxErrSummary) {
@@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev)
1367 spin_unlock(&rp->lock); 1422 spin_unlock(&rp->lock);
1368} 1423}
1369 1424
1370/* This routine is logically part of the interrupt handler, but isolated 1425/* Process up to limit frames from receive ring */
1371 for clarity and better register allocation. */ 1426static int rhine_rx(struct net_device *dev, int limit)
1372static void rhine_rx(struct net_device *dev)
1373{ 1427{
1374 struct rhine_private *rp = netdev_priv(dev); 1428 struct rhine_private *rp = netdev_priv(dev);
1429 int count;
1375 int entry = rp->cur_rx % RX_RING_SIZE; 1430 int entry = rp->cur_rx % RX_RING_SIZE;
1376 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1377 1431
1378 if (debug > 4) { 1432 if (debug > 4) {
1379 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", 1433 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev)
1382 } 1436 }
1383 1437
1384 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1438 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1385 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { 1439 for (count = 0; count < limit; ++count) {
1386 struct rx_desc *desc = rp->rx_head_desc; 1440 struct rx_desc *desc = rp->rx_head_desc;
1387 u32 desc_status = le32_to_cpu(desc->rx_status); 1441 u32 desc_status = le32_to_cpu(desc->rx_status);
1388 int data_size = desc_status >> 16; 1442 int data_size = desc_status >> 16;
1389 1443
1444 if (desc_status & DescOwn)
1445 break;
1446
1390 if (debug > 4) 1447 if (debug > 4)
1391 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", 1448 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1392 desc_status); 1449 desc_status);
1393 if (--boguscnt < 0) 1450
1394 break;
1395 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1451 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1396 if ((desc_status & RxWholePkt) != RxWholePkt) { 1452 if ((desc_status & RxWholePkt) != RxWholePkt) {
1397 printk(KERN_WARNING "%s: Oversized Ethernet " 1453 printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev)
1460 PCI_DMA_FROMDEVICE); 1516 PCI_DMA_FROMDEVICE);
1461 } 1517 }
1462 skb->protocol = eth_type_trans(skb, dev); 1518 skb->protocol = eth_type_trans(skb, dev);
1519#ifdef CONFIG_VIA_RHINE_NAPI
1520 netif_receive_skb(skb);
1521#else
1463 netif_rx(skb); 1522 netif_rx(skb);
1523#endif
1464 dev->last_rx = jiffies; 1524 dev->last_rx = jiffies;
1465 rp->stats.rx_bytes += pkt_len; 1525 rp->stats.rx_bytes += pkt_len;
1466 rp->stats.rx_packets++; 1526 rp->stats.rx_packets++;
@@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev)
1487 } 1547 }
1488 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 1548 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1489 } 1549 }
1550
1551 return count;
1490} 1552}
1491 1553
1492/* 1554/*
@@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev)
1776 spin_lock_irq(&rp->lock); 1838 spin_lock_irq(&rp->lock);
1777 1839
1778 netif_stop_queue(dev); 1840 netif_stop_queue(dev);
1841 netif_poll_disable(dev);
1779 1842
1780 if (debug > 1) 1843 if (debug > 1)
1781 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1844 printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
1857 } 1920 }
1858 1921
1859 /* Hit power state D3 (sleep) */ 1922 /* Hit power state D3 (sleep) */
1860 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 1923 if (!avoid_D3)
1924 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1861 1925
1862 /* TODO: Check use of pci_enable_wake() */ 1926 /* TODO: Check use of pci_enable_wake() */
1863 1927
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 435e91ec462..6b63b350cd5 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page)
118 118
119static inline void set_carrier(port_t *port) 119static inline void set_carrier(port_t *port)
120{ 120{
121 if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) 121 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
122 netif_carrier_on(port_to_dev(port)); 122 netif_carrier_on(port_to_dev(port));
123 else 123 else
124 netif_carrier_off(port_to_dev(port)); 124 netif_carrier_off(port_to_dev(port));
@@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port)
127 127
128static void sca_msci_intr(port_t *port) 128static void sca_msci_intr(port_t *port)
129{ 129{
130 u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ 130 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
131 131
132 /* Reset MSCI TX underrun status bit */ 132 /* Reset MSCI TX underrun and CDCD (ignored) status bit */
133 sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); 133 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
134 134
135 if (stat & ST1_UDRN) { 135 if (stat & ST1_UDRN) {
136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); 136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
@@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port)
138 stats->tx_fifo_errors++; 138 stats->tx_fifo_errors++;
139 } 139 }
140 140
141 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
141 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ 142 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
142 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); 143 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
143 144
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7caa8dc88a5..b1ba1872f31 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL");
500 500
501/* This is set up so that only a single autoprobe takes place per call. 501/* This is set up so that only a single autoprobe takes place per call.
502ISA device autoprobes on a running machine are not recommended. */ 502ISA device autoprobes on a running machine are not recommended. */
503int 503
504init_module(void) 504int __init init_module(void)
505{ 505{
506 struct net_device *dev; 506 struct net_device *dev;
507 int this_dev, found = 0; 507 int this_dev, found = 0;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index dafaa5ff5aa..d500012fdc7 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev)
1042 dev->name, local->fragm_threshold); 1042 dev->name, local->fragm_threshold);
1043 } 1043 }
1044 1044
1045 /* Some firmwares lose antenna selection settings on reset */
1046 (void) hostap_set_antsel(local);
1047
1045 return res; 1048 return res;
1046} 1049}
1047 1050
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 7f78b7801fb..bcc7038130f 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
242 u_int save_cor; 242 u_int save_cor;
243 243
244 /* Doing it if hardware is gone is guaranteed crash */ 244 /* Doing it if hardware is gone is guaranteed crash */
245 if (pcmcia_dev_present(link)) 245 if (!pcmcia_dev_present(link))
246 return -ENODEV; 246 return -ENODEV;
247 247
248 /* Save original COR value */ 248 /* Save original COR value */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index fd31885c684..ccaf28e8db0 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr,
467 struct net_device *dev) 467 struct net_device *dev)
468{ 468{
469 struct neighbour *neighbor_entry; 469 struct neighbour *neighbor_entry;
470 int ret = 0;
470 471
471 neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev); 472 neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
472 473
@@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr,
474 neighbor_entry->used = jiffies; 475 neighbor_entry->used = jiffies;
475 if (neighbor_entry->nud_state & NUD_VALID) { 476 if (neighbor_entry->nud_state & NUD_VALID) {
476 memcpy(haddr, neighbor_entry->ha, dev->addr_len); 477 memcpy(haddr, neighbor_entry->ha, dev->addr_len);
477 return 1; 478 ret = 1;
478 } 479 }
480 neigh_release(neighbor_entry);
479 } 481 }
480 return 0; 482 return ret;
481} 483}
482 484
483static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr, 485static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 3fae77ffb2f..8a60f391ffc 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM
76 76
77config HOTPLUG_PCI_ACPI 77config HOTPLUG_PCI_ACPI
78 tristate "ACPI PCI Hotplug driver" 78 tristate "ACPI PCI Hotplug driver"
79 depends on ACPI_DOCK && HOTPLUG_PCI 79 depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI)
80 help 80 help
81 Say Y here if you have a system that supports PCI Hotplug using 81 Say Y here if you have a system that supports PCI Hotplug using
82 ACPI. 82 ACPI.
@@ -153,13 +153,6 @@ config HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
153 153
154 When in doubt, say N. 154 When in doubt, say N.
155 155
156config HOTPLUG_PCI_SHPC_PHPRM_LEGACY
157 bool "For AMD SHPC only: Use $HRT for resource/configuration"
158 depends on HOTPLUG_PCI_SHPC && !ACPI
159 help
160 Say Y here for AMD SHPC. You have to select this option if you are
161 using this driver on platform with AMD SHPC.
162
163config HOTPLUG_PCI_RPA 156config HOTPLUG_PCI_RPA
164 tristate "RPA PCI Hotplug driver" 157 tristate "RPA PCI Hotplug driver"
165 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE 158 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 02be74caa89..4afcaffd031 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot)
254 254
255int cpci_configure_slot(struct slot* slot) 255int cpci_configure_slot(struct slot* slot)
256{ 256{
257 unsigned char busnr; 257 struct pci_bus *parent;
258 struct pci_bus *child; 258 int fn;
259 259
260 dbg("%s - enter", __FUNCTION__); 260 dbg("%s - enter", __FUNCTION__);
261 261
@@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot)
276 */ 276 */
277 n = pci_scan_slot(slot->bus, slot->devfn); 277 n = pci_scan_slot(slot->bus, slot->devfn);
278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); 278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
279 if (n > 0)
280 pci_bus_add_devices(slot->bus);
281 slot->dev = pci_get_slot(slot->bus, slot->devfn); 279 slot->dev = pci_get_slot(slot->bus, slot->devfn);
282 if (slot->dev == NULL) { 280 if (slot->dev == NULL) {
283 err("Could not find PCI device for slot %02x", slot->number); 281 err("Could not find PCI device for slot %02x", slot->number);
284 return 1; 282 return -ENODEV;
285 } 283 }
286 } 284 }
287 285 parent = slot->dev->bus;
288 if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 286
289 pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); 287 for (fn = 0; fn < 8; fn++) {
290 child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); 288 struct pci_dev *dev;
291 pci_do_scan_bus(child); 289
292 pci_bus_size_bridges(child); 290 dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
291 if (!dev)
292 continue;
293 if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
294 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
295 /* Find an unused bus number for the new bridge */
296 struct pci_bus *child;
297 unsigned char busnr, start = parent->secondary;
298 unsigned char end = parent->subordinate;
299
300 for (busnr = start; busnr <= end; busnr++) {
301 if (!pci_find_bus(pci_domain_nr(parent),
302 busnr))
303 break;
304 }
305 if (busnr >= end) {
306 err("No free bus for hot-added bridge\n");
307 pci_dev_put(dev);
308 continue;
309 }
310 child = pci_add_new_bus(parent, dev, busnr);
311 if (!child) {
312 err("Cannot add new bus for %s\n",
313 pci_name(dev));
314 pci_dev_put(dev);
315 continue;
316 }
317 child->subordinate = pci_do_scan_bus(child);
318 pci_bus_size_bridges(child);
319 }
320 pci_dev_put(dev);
293 } 321 }
294 322
295 pci_bus_assign_resources(slot->dev->bus); 323 pci_bus_assign_resources(parent);
324 pci_bus_add_devices(parent);
325 pci_enable_bridges(parent);
296 326
297 dbg("%s - exit", __FUNCTION__); 327 dbg("%s - exit", __FUNCTION__);
298 return 0; 328 return 0;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index ce89f581586..eaea9d36a1b 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -279,6 +279,11 @@ struct hpc_ops {
279 279
280 280
281#ifdef CONFIG_ACPI 281#ifdef CONFIG_ACPI
282#include <acpi/acpi.h>
283#include <acpi/acpi_bus.h>
284#include <acpi/actypes.h>
285#include <linux/pci-acpi.h>
286
282#define pciehp_get_hp_hw_control_from_firmware(dev) \ 287#define pciehp_get_hp_hw_control_from_firmware(dev) \
283 pciehp_acpi_get_hp_hw_control_from_firmware(dev) 288 pciehp_acpi_get_hp_hw_control_from_firmware(dev)
284static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, 289static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 0d8fb6e607a..6ab3b6cd2b5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -38,10 +38,6 @@
38 38
39#include "../pci.h" 39#include "../pci.h"
40#include "pciehp.h" 40#include "pciehp.h"
41#include <acpi/acpi.h>
42#include <acpi/acpi_bus.h>
43#include <acpi/actypes.h>
44#include <linux/pci-acpi.h>
45#ifdef DEBUG 41#ifdef DEBUG
46#define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */ 42#define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */
47#define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */ 43#define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 10e1a905c14..474e9cd0e9e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
139/** 139/**
140 * pci_match_device - Tell if a PCI device structure has a matching 140 * pci_match_device - Tell if a PCI device structure has a matching
141 * PCI device id structure 141 * PCI device id structure
142 * @ids: array of PCI device id structures to search in
143 * @dev: the PCI device structure to match against
144 * @drv: the PCI driver to match against 142 * @drv: the PCI driver to match against
143 * @dev: the PCI device structure to match against
145 * 144 *
146 * Used by a driver to check whether a PCI device present in the 145 * Used by a driver to check whether a PCI device present in the
147 * system is in its list of supported devices. Returns the matching 146 * system is in its list of supported devices. Returns the matching
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fb08bc951ac..73177429fe7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
438 pci_read_config_dword(dev, 0x48, &region); 438 pci_read_config_dword(dev, 0x48, &region);
439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); 439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
440} 440}
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi );
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); 442DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
442 443
443/* 444/*
@@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
1091DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); 1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); 1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); 1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
1095 1095
1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1097{ 1097{
@@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1518} 1518}
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); 1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
1520 1520
1521static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1522{
1523 u16 command;
1524 u32 bar;
1525 u8 __iomem *csr;
1526 u8 cmd_hi;
1527
1528 switch (dev->device) {
1529 /* PCI IDs taken from drivers/net/e100.c */
1530 case 0x1029:
1531 case 0x1030 ... 0x1034:
1532 case 0x1038 ... 0x103E:
1533 case 0x1050 ... 0x1057:
1534 case 0x1059:
1535 case 0x1064 ... 0x106B:
1536 case 0x1091 ... 0x1095:
1537 case 0x1209:
1538 case 0x1229:
1539 case 0x2449:
1540 case 0x2459:
1541 case 0x245D:
1542 case 0x27DC:
1543 break;
1544 default:
1545 return;
1546 }
1547
1548 /*
1549 * Some firmware hands off the e100 with interrupts enabled,
1550 * which can cause a flood of interrupts if packets are
1551 * received before the driver attaches to the device. So
1552 * disable all e100 interrupts here. The driver will
1553 * re-enable them when it's ready.
1554 */
1555 pci_read_config_word(dev, PCI_COMMAND, &command);
1556 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
1557
1558 if (!(command & PCI_COMMAND_MEMORY) || !bar)
1559 return;
1560
1561 csr = ioremap(bar, 8);
1562 if (!csr) {
1563 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
1564 pci_name(dev));
1565 return;
1566 }
1567
1568 cmd_hi = readb(csr + 3);
1569 if (cmd_hi == 0) {
1570 printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts "
1571 "enabled, disabling\n", pci_name(dev));
1572 writeb(1, csr + 3);
1573 }
1574
1575 iounmap(csr);
1576}
1577DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
1521 1578
1522static void __devinit fixup_rev1_53c810(struct pci_dev* dev) 1579static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1523{ 1580{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d6d1bff52b8..2c7de79c83b 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to)
69 69
70 pr_debug("%s: aie=%d\n", __FUNCTION__, to); 70 pr_debug("%s: aie=%d\n", __FUNCTION__, to);
71 71
72 tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 72 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
73 73
74 if (to) 74 if (to)
75 tmp |= S3C2410_RTCALM_ALMEN; 75 tmp |= S3C2410_RTCALM_ALMEN;
76 76
77 writeb(tmp, S3C2410_RTCALM); 77 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
78} 78}
79 79
80static void s3c_rtc_setpie(int to) 80static void s3c_rtc_setpie(int to)
@@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to)
84 pr_debug("%s: pie=%d\n", __FUNCTION__, to); 84 pr_debug("%s: pie=%d\n", __FUNCTION__, to);
85 85
86 spin_lock_irq(&s3c_rtc_pie_lock); 86 spin_lock_irq(&s3c_rtc_pie_lock);
87 tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; 87 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
88 88
89 if (to) 89 if (to)
90 tmp |= S3C2410_TICNT_ENABLE; 90 tmp |= S3C2410_TICNT_ENABLE;
91 91
92 writeb(tmp, S3C2410_TICNT); 92 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
93 spin_unlock_irq(&s3c_rtc_pie_lock); 93 spin_unlock_irq(&s3c_rtc_pie_lock);
94} 94}
95 95
@@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq)
98 unsigned int tmp; 98 unsigned int tmp;
99 99
100 spin_lock_irq(&s3c_rtc_pie_lock); 100 spin_lock_irq(&s3c_rtc_pie_lock);
101 tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; 101 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
102 102
103 s3c_rtc_freq = freq; 103 s3c_rtc_freq = freq;
104 104
105 tmp |= (128 / freq)-1; 105 tmp |= (128 / freq)-1;
106 106
107 writeb(tmp, S3C2410_TICNT); 107 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
108 spin_unlock_irq(&s3c_rtc_pie_lock); 108 spin_unlock_irq(&s3c_rtc_pie_lock);
109} 109}
110 110
@@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq)
113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
114{ 114{
115 unsigned int have_retried = 0; 115 unsigned int have_retried = 0;
116 void __iomem *base = s3c_rtc_base;
116 117
117 retry_get_time: 118 retry_get_time:
118 rtc_tm->tm_min = readb(S3C2410_RTCMIN); 119 rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
119 rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); 120 rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
120 rtc_tm->tm_mday = readb(S3C2410_RTCDATE); 121 rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
121 rtc_tm->tm_mon = readb(S3C2410_RTCMON); 122 rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
122 rtc_tm->tm_year = readb(S3C2410_RTCYEAR); 123 rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
123 rtc_tm->tm_sec = readb(S3C2410_RTCSEC); 124 rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
124 125
125 /* the only way to work out wether the system was mid-update 126 /* the only way to work out wether the system was mid-update
126 * when we read it is to check the second counter, and if it 127 * when we read it is to check the second counter, and if it
@@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
151 152
152static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) 153static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
153{ 154{
154 /* the rtc gets round the y2k problem by just not supporting it */ 155 void __iomem *base = s3c_rtc_base;
156 int year = tm->tm_year - 100;
155 157
156 if (tm->tm_year < 100) 158 pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
159 tm->tm_year, tm->tm_mon, tm->tm_mday,
160 tm->tm_hour, tm->tm_min, tm->tm_sec);
161
162 /* we get around y2k by simply not supporting it */
163
164 if (year < 0 || year >= 100) {
165 dev_err(dev, "rtc only supports 100 years\n");
157 return -EINVAL; 166 return -EINVAL;
167 }
158 168
159 writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); 169 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC);
160 writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); 170 writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN);
161 writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); 171 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
162 writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); 172 writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
163 writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); 173 writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
164 writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); 174 writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
165 175
166 return 0; 176 return 0;
167} 177}
@@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
169static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) 179static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
170{ 180{
171 struct rtc_time *alm_tm = &alrm->time; 181 struct rtc_time *alm_tm = &alrm->time;
182 void __iomem *base = s3c_rtc_base;
172 unsigned int alm_en; 183 unsigned int alm_en;
173 184
174 alm_tm->tm_sec = readb(S3C2410_ALMSEC); 185 alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
175 alm_tm->tm_min = readb(S3C2410_ALMMIN); 186 alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
176 alm_tm->tm_hour = readb(S3C2410_ALMHOUR); 187 alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
177 alm_tm->tm_mon = readb(S3C2410_ALMMON); 188 alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
178 alm_tm->tm_mday = readb(S3C2410_ALMDATE); 189 alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
179 alm_tm->tm_year = readb(S3C2410_ALMYEAR); 190 alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
180 191
181 alm_en = readb(S3C2410_RTCALM); 192 alm_en = readb(base + S3C2410_RTCALM);
182 193
183 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", 194 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
184 alm_en, 195 alm_en,
@@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
226static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) 237static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227{ 238{
228 struct rtc_time *tm = &alrm->time; 239 struct rtc_time *tm = &alrm->time;
240 void __iomem *base = s3c_rtc_base;
229 unsigned int alrm_en; 241 unsigned int alrm_en;
230 242
231 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", 243 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
@@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
234 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); 246 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
235 247
236 248
237 alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 249 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
238 writeb(0x00, S3C2410_RTCALM); 250 writeb(0x00, base + S3C2410_RTCALM);
239 251
240 if (tm->tm_sec < 60 && tm->tm_sec >= 0) { 252 if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
241 alrm_en |= S3C2410_RTCALM_SECEN; 253 alrm_en |= S3C2410_RTCALM_SECEN;
242 writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); 254 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
243 } 255 }
244 256
245 if (tm->tm_min < 60 && tm->tm_min >= 0) { 257 if (tm->tm_min < 60 && tm->tm_min >= 0) {
246 alrm_en |= S3C2410_RTCALM_MINEN; 258 alrm_en |= S3C2410_RTCALM_MINEN;
247 writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); 259 writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
248 } 260 }
249 261
250 if (tm->tm_hour < 24 && tm->tm_hour >= 0) { 262 if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
251 alrm_en |= S3C2410_RTCALM_HOUREN; 263 alrm_en |= S3C2410_RTCALM_HOUREN;
252 writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); 264 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
253 } 265 }
254 266
255 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); 267 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
256 268
257 writeb(alrm_en, S3C2410_RTCALM); 269 writeb(alrm_en, base + S3C2410_RTCALM);
258 270
259 if (0) { 271 if (0) {
260 alrm_en = readb(S3C2410_RTCALM); 272 alrm_en = readb(base + S3C2410_RTCALM);
261 alrm_en &= ~S3C2410_RTCALM_ALMEN; 273 alrm_en &= ~S3C2410_RTCALM_ALMEN;
262 writeb(alrm_en, S3C2410_RTCALM); 274 writeb(alrm_en, base + S3C2410_RTCALM);
263 disable_irq_wake(s3c_rtc_alarmno); 275 disable_irq_wake(s3c_rtc_alarmno);
264 } 276 }
265 277
@@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev,
319 331
320static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 332static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
321{ 333{
322 unsigned int rtcalm = readb(S3C2410_RTCALM); 334 unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
323 unsigned int ticnt = readb (S3C2410_TICNT); 335 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
324 336
325 seq_printf(seq, "alarm_IRQ\t: %s\n", 337 seq_printf(seq, "alarm_IRQ\t: %s\n",
326 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); 338 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
@@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = {
387 399
388static void s3c_rtc_enable(struct platform_device *pdev, int en) 400static void s3c_rtc_enable(struct platform_device *pdev, int en)
389{ 401{
402 void __iomem *base = s3c_rtc_base;
390 unsigned int tmp; 403 unsigned int tmp;
391 404
392 if (s3c_rtc_base == NULL) 405 if (s3c_rtc_base == NULL)
393 return; 406 return;
394 407
395 if (!en) { 408 if (!en) {
396 tmp = readb(S3C2410_RTCCON); 409 tmp = readb(base + S3C2410_RTCCON);
397 writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); 410 writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
398 411
399 tmp = readb(S3C2410_TICNT); 412 tmp = readb(base + S3C2410_TICNT);
400 writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); 413 writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
401 } else { 414 } else {
402 /* re-enable the device, and check it is ok */ 415 /* re-enable the device, and check it is ok */
403 416
404 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ 417 if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
405 dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); 418 dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
406 419
407 tmp = readb(S3C2410_RTCCON); 420 tmp = readb(base + S3C2410_RTCCON);
408 writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); 421 writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
409 } 422 }
410 423
411 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ 424 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
412 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); 425 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
413 426
414 tmp = readb(S3C2410_RTCCON); 427 tmp = readb(base + S3C2410_RTCCON);
415 writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); 428 writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
416 } 429 }
417 430
418 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ 431 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
419 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); 432 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
420 433
421 tmp = readb(S3C2410_RTCCON); 434 tmp = readb(base + S3C2410_RTCCON);
422 writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); 435 writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
423 } 436 }
424 } 437 }
425} 438}
@@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
475 } 488 }
476 489
477 s3c_rtc_mem = request_mem_region(res->start, 490 s3c_rtc_mem = request_mem_region(res->start,
478 res->end-res->start+1, 491 res->end-res->start+1,
479 pdev->name); 492 pdev->name);
480 493
481 if (s3c_rtc_mem == NULL) { 494 if (s3c_rtc_mem == NULL) {
482 dev_err(&pdev->dev, "failed to reserve memory region\n"); 495 dev_err(&pdev->dev, "failed to reserve memory region\n");
@@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
495 508
496 s3c_rtc_enable(pdev, 1); 509 s3c_rtc_enable(pdev, 1);
497 510
498 pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); 511 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
512 readb(s3c_rtc_base + S3C2410_RTCCON));
499 513
500 s3c_rtc_setfreq(s3c_rtc_freq); 514 s3c_rtc_setfreq(s3c_rtc_freq);
501 515
@@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
543 557
544 /* save TICNT for anyone using periodic interrupts */ 558 /* save TICNT for anyone using periodic interrupts */
545 559
546 ticnt_save = readb(S3C2410_TICNT); 560 ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
547 561
548 /* calculate time delta for suspend */ 562 /* calculate time delta for suspend */
549 563
@@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
567 rtc_tm_to_time(&tm, &time.tv_sec); 581 rtc_tm_to_time(&tm, &time.tv_sec);
568 restore_time_delta(&s3c_rtc_delta, &time); 582 restore_time_delta(&s3c_rtc_delta, &time);
569 583
570 writeb(ticnt_save, S3C2410_TICNT); 584 writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
571 return 0; 585 return 0;
572} 586}
573#else 587#else
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4bf03fb67f8..d8e9b95f0a1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1730,8 +1730,8 @@ dasd_flush_request_queue(struct dasd_device * device)
1730 req = elv_next_request(device->request_queue); 1730 req = elv_next_request(device->request_queue);
1731 if (req == NULL) 1731 if (req == NULL)
1732 break; 1732 break;
1733 dasd_end_request(req, 0);
1734 blkdev_dequeue_request(req); 1733 blkdev_dequeue_request(req);
1734 dasd_end_request(req, 0);
1735 } 1735 }
1736 spin_unlock_irq(&device->request_queue_lock); 1736 spin_unlock_irq(&device->request_queue_lock);
1737} 1737}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 7f6fdac7470..9af02c79ce8 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -48,18 +48,20 @@ struct dasd_devmap {
48}; 48};
49 49
50/* 50/*
51 * dasd_servermap is used to store the server_id of all storage servers 51 * dasd_server_ssid_map contains a globally unique storage server subsystem ID.
52 * accessed by DASD device driver. 52 * dasd_server_ssid_list contains the list of all subsystem IDs accessed by
53 * the DASD device driver.
53 */ 54 */
54struct dasd_servermap { 55struct dasd_server_ssid_map {
55 struct list_head list; 56 struct list_head list;
56 struct server_id { 57 struct system_id {
57 char vendor[4]; 58 char vendor[4];
58 char serial[15]; 59 char serial[15];
60 __u16 ssid;
59 } sid; 61 } sid;
60}; 62};
61 63
62static struct list_head dasd_serverlist; 64static struct list_head dasd_server_ssid_list;
63 65
64/* 66/*
65 * Parameter parsing functions for dasd= parameter. The syntax is: 67 * Parameter parsing functions for dasd= parameter. The syntax is:
@@ -89,7 +91,7 @@ static char *dasd[256];
89module_param_array(dasd, charp, NULL, 0); 91module_param_array(dasd, charp, NULL, 0);
90 92
91/* 93/*
92 * Single spinlock to protect devmap structures and lists. 94 * Single spinlock to protect devmap and servermap structures and lists.
93 */ 95 */
94static DEFINE_SPINLOCK(dasd_devmap_lock); 96static DEFINE_SPINLOCK(dasd_devmap_lock);
95 97
@@ -264,8 +266,9 @@ dasd_parse_keyword( char *parsestring ) {
264 if (dasd_page_cache) 266 if (dasd_page_cache)
265 return residual_str; 267 return residual_str;
266 dasd_page_cache = 268 dasd_page_cache =
267 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0, 269 kmem_cache_create("dasd_page_cache", PAGE_SIZE,
268 SLAB_CACHE_DMA, NULL, NULL ); 270 PAGE_SIZE, SLAB_CACHE_DMA,
271 NULL, NULL );
269 if (!dasd_page_cache) 272 if (!dasd_page_cache)
270 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 273 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
271 "fixed buffer mode disabled."); 274 "fixed buffer mode disabled.");
@@ -859,39 +862,6 @@ static struct attribute_group dasd_attr_group = {
859}; 862};
860 863
861/* 864/*
862 * Check if the related storage server is already contained in the
863 * dasd_serverlist. If server is not contained, create new entry.
864 * Return 0 if server was already in serverlist,
865 * 1 if the server was added successfully
866 * <0 in case of error.
867 */
868static int
869dasd_add_server(struct dasd_uid *uid)
870{
871 struct dasd_servermap *new, *tmp;
872
873 /* check if server is already contained */
874 list_for_each_entry(tmp, &dasd_serverlist, list)
875 // normale cmp?
876 if (strncmp(tmp->sid.vendor, uid->vendor,
877 sizeof(tmp->sid.vendor)) == 0
878 && strncmp(tmp->sid.serial, uid->serial,
879 sizeof(tmp->sid.serial)) == 0)
880 return 0;
881
882 new = (struct dasd_servermap *)
883 kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL);
884 if (!new)
885 return -ENOMEM;
886
887 strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor));
888 strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial));
889 list_add(&new->list, &dasd_serverlist);
890 return 1;
891}
892
893
894/*
895 * Return copy of the device unique identifier. 865 * Return copy of the device unique identifier.
896 */ 866 */
897int 867int
@@ -910,6 +880,9 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
910 880
911/* 881/*
912 * Register the given device unique identifier into devmap struct. 882 * Register the given device unique identifier into devmap struct.
883 * In addition check if the related storage server subsystem ID is already
884 * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
885 * create new entry.
913 * Return 0 if server was already in serverlist, 886 * Return 0 if server was already in serverlist,
914 * 1 if the server was added successful 887 * 1 if the server was added successful
915 * <0 in case of error. 888 * <0 in case of error.
@@ -918,16 +891,39 @@ int
918dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) 891dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
919{ 892{
920 struct dasd_devmap *devmap; 893 struct dasd_devmap *devmap;
921 int rc; 894 struct dasd_server_ssid_map *srv, *tmp;
922 895
923 devmap = dasd_find_busid(cdev->dev.bus_id); 896 devmap = dasd_find_busid(cdev->dev.bus_id);
924 if (IS_ERR(devmap)) 897 if (IS_ERR(devmap))
925 return PTR_ERR(devmap); 898 return PTR_ERR(devmap);
899
900 /* generate entry for server_ssid_map */
901 srv = (struct dasd_server_ssid_map *)
902 kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL);
903 if (!srv)
904 return -ENOMEM;
905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
907 srv->sid.ssid = uid->ssid;
908
909 /* server is already contained ? */
926 spin_lock(&dasd_devmap_lock); 910 spin_lock(&dasd_devmap_lock);
927 devmap->uid = *uid; 911 devmap->uid = *uid;
928 rc = dasd_add_server(uid); 912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
913 if (!memcmp(&srv->sid, &tmp->sid,
914 sizeof(struct system_id))) {
915 kfree(srv);
916 srv = NULL;
917 break;
918 }
919 }
920
921 /* add servermap to serverlist */
922 if (srv)
923 list_add(&srv->list, &dasd_server_ssid_list);
929 spin_unlock(&dasd_devmap_lock); 924 spin_unlock(&dasd_devmap_lock);
930 return rc; 925
926 return (srv ? 1 : 0);
931} 927}
932EXPORT_SYMBOL_GPL(dasd_set_uid); 928EXPORT_SYMBOL_GPL(dasd_set_uid);
933 929
@@ -995,7 +991,7 @@ dasd_devmap_init(void)
995 INIT_LIST_HEAD(&dasd_hashlists[i]); 991 INIT_LIST_HEAD(&dasd_hashlists[i]);
996 992
997 /* Initialize servermap structure. */ 993 /* Initialize servermap structure. */
998 INIT_LIST_HEAD(&dasd_serverlist); 994 INIT_LIST_HEAD(&dasd_server_ssid_list);
999 return 0; 995 return 0;
1000} 996}
1001 997
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 39c2281371b..b7a7fac3f7c 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -468,11 +468,11 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
468 return -ENODEV; 468 return -ENODEV;
469 469
470 memset(uid, 0, sizeof(struct dasd_uid)); 470 memset(uid, 0, sizeof(struct dasd_uid));
471 strncpy(uid->vendor, confdata->ned1.HDA_manufacturer, 471 memcpy(uid->vendor, confdata->ned1.HDA_manufacturer,
472 sizeof(uid->vendor) - 1); 472 sizeof(uid->vendor) - 1);
473 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 473 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
474 strncpy(uid->serial, confdata->ned1.HDA_location, 474 memcpy(uid->serial, confdata->ned1.HDA_location,
475 sizeof(uid->serial) - 1); 475 sizeof(uid->serial) - 1);
476 EBCASC(uid->serial, sizeof(uid->serial) - 1); 476 EBCASC(uid->serial, sizeof(uid->serial) - 1);
477 uid->ssid = confdata->neq.subsystemID; 477 uid->ssid = confdata->neq.subsystemID;
478 if (confdata->ned2.sneq.flags == 0x40) { 478 if (confdata->ned2.sneq.flags == 0x40) {
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
607 * Valide storage server of current device. 607 * Valide storage server of current device.
608 */ 608 */
609static int 609static int
610dasd_eckd_validate_server(struct dasd_device *device) 610dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
611{ 611{
612 int rc; 612 int rc;
613 613
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
616 return 0; 616 return 0;
617 617
618 rc = dasd_eckd_psf_ssc(device); 618 rc = dasd_eckd_psf_ssc(device);
619 if (rc) 619 /* may be requested feature is not available on server,
620 /* may be requested feature is not available on server, 620 * therefore just report error and go ahead */
621 * therefore just report error and go ahead */ 621 DEV_MESSAGE(KERN_INFO, device,
622 DEV_MESSAGE(KERN_INFO, device, 622 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
623 "Perform Subsystem Function returned rc=%d", rc); 623 uid->vendor, uid->serial, uid->ssid, rc);
624 /* RE-Read Configuration Data */ 624 /* RE-Read Configuration Data */
625 return dasd_eckd_read_conf(device); 625 return dasd_eckd_read_conf(device);
626} 626}
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
666 return rc; 666 return rc;
667 rc = dasd_set_uid(device->cdev, &uid); 667 rc = dasd_set_uid(device->cdev, &uid);
668 if (rc == 1) /* new server found */ 668 if (rc == 1) /* new server found */
669 rc = dasd_eckd_validate_server(device); 669 rc = dasd_eckd_validate_server(device, &uid);
670 if (rc) 670 if (rc)
671 return rc; 671 return rc;
672 672
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 1140302ff11..ca7d51f7ecc 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -48,15 +48,6 @@
48#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) 48#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
49 49
50 50
51static struct sysdev_class xpram_sysclass = {
52 set_kset_name("xpram"),
53};
54
55static struct sys_device xpram_sys_device = {
56 .id = 0,
57 .cls = &xpram_sysclass,
58};
59
60typedef struct { 51typedef struct {
61 unsigned int size; /* size of xpram segment in pages */ 52 unsigned int size; /* size of xpram segment in pages */
62 unsigned int offset; /* start page of xpram segment */ 53 unsigned int offset; /* start page of xpram segment */
@@ -451,8 +442,6 @@ static void __exit xpram_exit(void)
451 } 442 }
452 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); 443 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
453 blk_cleanup_queue(xpram_queue); 444 blk_cleanup_queue(xpram_queue);
454 sysdev_unregister(&xpram_sys_device);
455 sysdev_class_unregister(&xpram_sysclass);
456} 445}
457 446
458static int __init xpram_init(void) 447static int __init xpram_init(void)
@@ -470,19 +459,7 @@ static int __init xpram_init(void)
470 rc = xpram_setup_sizes(xpram_pages); 459 rc = xpram_setup_sizes(xpram_pages);
471 if (rc) 460 if (rc)
472 return rc; 461 return rc;
473 rc = sysdev_class_register(&xpram_sysclass); 462 return xpram_setup_blkdev();
474 if (rc)
475 return rc;
476
477 rc = sysdev_register(&xpram_sys_device);
478 if (rc) {
479 sysdev_class_unregister(&xpram_sysclass);
480 return rc;
481 }
482 rc = xpram_setup_blkdev();
483 if (rc)
484 sysdev_unregister(&xpram_sys_device);
485 return rc;
486} 463}
487 464
488module_init(xpram_init); 465module_init(xpram_init);
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 643b6d07856..56b87618b10 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -76,7 +76,7 @@ struct tape_class_device *register_tape_dev(
76 device, 76 device,
77 "%s", tcd->device_name 77 "%s", tcd->device_name
78 ); 78 );
79 rc = PTR_ERR(tcd->class_device); 79 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
80 if (rc) 80 if (rc)
81 goto fail_with_cdev; 81 goto fail_with_cdev;
82 rc = sysfs_create_link( 82 rc = sysfs_create_link(
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 7a39e0b0386..6d91c2eb205 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -772,6 +772,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
772 stsch(sch->schid, &sch->schib); 772 stsch(sch->schid, &sch->schib);
773 773
774 if (sch->schib.scsw.actl != 0 || 774 if (sch->schib.scsw.actl != 0 ||
775 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
775 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 776 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
776 /* 777 /*
777 * No final status yet or final status not yet delivered 778 * No final status yet or final status not yet delivered
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a60124264be..9e3de0bd59b 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -263,6 +263,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
263 /* Abuse intparm for error reporting. */ 263 /* Abuse intparm for error reporting. */
264 if (IS_ERR(irb)) 264 if (IS_ERR(irb))
265 cdev->private->intparm = -EIO; 265 cdev->private->intparm = -EIO;
266 else if (irb->scsw.cc == 1)
267 /* Retry for deferred condition code. */
268 cdev->private->intparm = -EAGAIN;
266 else if ((irb->scsw.dstat != 269 else if ((irb->scsw.dstat !=
267 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 270 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
268 (irb->scsw.cstat != 0)) { 271 (irb->scsw.cstat != 0)) {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 5fff1f93973..e1327b8fce0 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8510,9 +8510,9 @@ static int
8510qeth_ipv6_init(void) 8510qeth_ipv6_init(void)
8511{ 8511{
8512 qeth_old_arp_constructor = arp_tbl.constructor; 8512 qeth_old_arp_constructor = arp_tbl.constructor;
8513 write_lock(&arp_tbl.lock); 8513 write_lock_bh(&arp_tbl.lock);
8514 arp_tbl.constructor = qeth_arp_constructor; 8514 arp_tbl.constructor = qeth_arp_constructor;
8515 write_unlock(&arp_tbl.lock); 8515 write_unlock_bh(&arp_tbl.lock);
8516 8516
8517 arp_direct_ops = (struct neigh_ops*) 8517 arp_direct_ops = (struct neigh_ops*)
8518 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); 8518 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
@@ -8528,9 +8528,9 @@ qeth_ipv6_init(void)
8528static void 8528static void
8529qeth_ipv6_uninit(void) 8529qeth_ipv6_uninit(void)
8530{ 8530{
8531 write_lock(&arp_tbl.lock); 8531 write_lock_bh(&arp_tbl.lock);
8532 arp_tbl.constructor = qeth_old_arp_constructor; 8532 arp_tbl.constructor = qeth_old_arp_constructor;
8533 write_unlock(&arp_tbl.lock); 8533 write_unlock_bh(&arp_tbl.lock);
8534 kfree(arp_direct_ops); 8534 kfree(arp_direct_ops);
8535} 8535}
8536#endif /* CONFIG_QETH_IPV6 */ 8536#endif /* CONFIG_QETH_IPV6 */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cd789b8acd..adc9d8f2c28 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
112 printk("\n"); 112 printk("\n");
113} 113}
114 114
115
116/****************************************************************/
117/****** Functions to handle the request ID hash table ********/
118/****************************************************************/
119
120#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
121
122static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
123{
124 int i;
125
126 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
127 GFP_KERNEL);
128
129 if (!adapter->req_list)
130 return -ENOMEM;
131
132 for (i=0; i<REQUEST_LIST_SIZE; i++)
133 INIT_LIST_HEAD(&adapter->req_list[i]);
134
135 return 0;
136}
137
138static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
139{
140 struct zfcp_fsf_req *request, *tmp;
141 unsigned int i;
142
143 for (i=0; i<REQUEST_LIST_SIZE; i++) {
144 if (list_empty(&adapter->req_list[i]))
145 continue;
146
147 list_for_each_entry_safe(request, tmp,
148 &adapter->req_list[i], list)
149 list_del(&request->list);
150 }
151
152 kfree(adapter->req_list);
153}
154
155void zfcp_reqlist_add(struct zfcp_adapter *adapter,
156 struct zfcp_fsf_req *fsf_req)
157{
158 unsigned int i;
159
160 i = fsf_req->req_id % REQUEST_LIST_SIZE;
161 list_add_tail(&fsf_req->list, &adapter->req_list[i]);
162}
163
164void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
165{
166 struct zfcp_fsf_req *request, *tmp;
167 unsigned int i, counter;
168 u64 dbg_tmp[2];
169
170 i = req_id % REQUEST_LIST_SIZE;
171 BUG_ON(list_empty(&adapter->req_list[i]));
172
173 counter = 0;
174 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
175 if (request->req_id == req_id) {
176 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
177 dbg_tmp[1] = (u64) counter;
178 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
179 list_del(&request->list);
180 break;
181 }
182 counter++;
183 }
184}
185
186struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
187 unsigned long req_id)
188{
189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i;
191
192 i = req_id % REQUEST_LIST_SIZE;
193
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
195 if (request->req_id == req_id)
196 return request;
197
198 return NULL;
199}
200
201int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
202{
203 unsigned int i;
204
205 for (i=0; i<REQUEST_LIST_SIZE; i++)
206 if (!list_empty(&adapter->req_list[i]))
207 return 0;
208
209 return 1;
210}
211
212#undef ZFCP_LOG_AREA
213
115/****************************************************************/ 214/****************************************************************/
116/************** Uncategorised Functions *************************/ 215/************** Uncategorised Functions *************************/
117/****************************************************************/ 216/****************************************************************/
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
961 INIT_LIST_HEAD(&adapter->port_remove_lh); 1060 INIT_LIST_HEAD(&adapter->port_remove_lh);
962 1061
963 /* initialize list of fsf requests */ 1062 /* initialize list of fsf requests */
964 spin_lock_init(&adapter->fsf_req_list_lock); 1063 spin_lock_init(&adapter->req_list_lock);
965 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 1064 retval = zfcp_reqlist_init(adapter);
1065 if (retval) {
1066 ZFCP_LOG_INFO("request list initialization failed\n");
1067 goto failed_low_mem_buffers;
1068 }
966 1069
967 /* initialize debug locks */ 1070 /* initialize debug locks */
968 1071
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1041 * !0 - struct zfcp_adapter data structure could not be removed 1144 * !0 - struct zfcp_adapter data structure could not be removed
1042 * (e.g. still used) 1145 * (e.g. still used)
1043 * locks: adapter list write lock is assumed to be held by caller 1146 * locks: adapter list write lock is assumed to be held by caller
1044 * adapter->fsf_req_list_lock is taken and released within this
1045 * function and must not be held on entry
1046 */ 1147 */
1047void 1148void
1048zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 1149zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1054 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 1155 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1055 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 1156 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1056 /* sanity check: no pending FSF requests */ 1157 /* sanity check: no pending FSF requests */
1057 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 1158 spin_lock_irqsave(&adapter->req_list_lock, flags);
1058 retval = !list_empty(&adapter->fsf_req_list_head); 1159 retval = zfcp_reqlist_isempty(adapter);
1059 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 1160 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1060 if (retval) { 1161 if (!retval) {
1061 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 1162 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
1062 "%i requests outstanding\n", 1163 "%i requests outstanding\n",
1063 zfcp_get_busid_by_adapter(adapter), adapter, 1164 zfcp_get_busid_by_adapter(adapter), adapter,
1064 atomic_read(&adapter->fsf_reqs_active)); 1165 atomic_read(&adapter->reqs_active));
1065 retval = -EBUSY; 1166 retval = -EBUSY;
1066 goto out; 1167 goto out;
1067 } 1168 }
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1087 zfcp_free_low_mem_buffers(adapter); 1188 zfcp_free_low_mem_buffers(adapter);
1088 /* free memory of adapter data structure and queues */ 1189 /* free memory of adapter data structure and queues */
1089 zfcp_qdio_free_queues(adapter); 1190 zfcp_qdio_free_queues(adapter);
1191 zfcp_reqlist_free(adapter);
1090 kfree(adapter->fc_stats); 1192 kfree(adapter->fc_stats);
1091 kfree(adapter->stats_reset_data); 1193 kfree(adapter->stats_reset_data);
1092 ZFCP_LOG_TRACE("freeing adapter structure\n"); 1194 ZFCP_LOG_TRACE("freeing adapter structure\n");
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 57d8e4bfb8d..fdabadeaa9e 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
164 retval = zfcp_adapter_scsi_register(adapter); 164 retval = zfcp_adapter_scsi_register(adapter);
165 if (retval) 165 if (retval)
166 goto out_scsi_register; 166 goto out_scsi_register;
167
168 /* initialize request counter */
169 BUG_ON(!zfcp_reqlist_isempty(adapter));
170 adapter->req_no = 0;
171
167 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, 172 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
168 ZFCP_SET); 173 ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 174 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2df512a18e2..94d1b74db35 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -52,7 +52,7 @@
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
53 53
54/* zfcp version number, it consists of major, minor, and patch-level number */ 54/* zfcp version number, it consists of major, minor, and patch-level number */
55#define ZFCP_VERSION "4.7.0" 55#define ZFCP_VERSION "4.8.0"
56 56
57/** 57/**
58 * zfcp_sg_to_address - determine kernel address from struct scatterlist 58 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
80#define REQUEST_LIST_SIZE 128 80#define REQUEST_LIST_SIZE 128
81 81
82/********************* SCSI SPECIFIC DEFINES *********************************/ 82/********************* SCSI SPECIFIC DEFINES *********************************/
83#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) 83#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
84 84
85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
86 86
@@ -886,11 +886,11 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 886 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 887 removed */
888 u32 ports; /* number of remote ports */ 888 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */ 889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 struct list_head fsf_req_list_head; /* head of FSF req list */ 890 atomic_t reqs_active; /* # active FSF reqs */
891 spinlock_t fsf_req_list_lock; /* lock for ops on list of 891 unsigned long req_no; /* unique FSF req number */
892 FSF requests */ 892 struct list_head *req_list; /* list of pending reqs */
893 atomic_t fsf_reqs_active; /* # active FSF reqs */ 893 spinlock_t req_list_lock; /* request list lock */
894 struct zfcp_qdio_queue request_queue; /* request queue */ 894 struct zfcp_qdio_queue request_queue; /* request queue */
895 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 895 u32 fsf_req_seq_no; /* FSF cmnd seq number */
896 wait_queue_head_t request_wq; /* can be used to wait for 896 wait_queue_head_t request_wq; /* can be used to wait for
@@ -986,6 +986,7 @@ struct zfcp_unit {
986/* FSF request */ 986/* FSF request */
987struct zfcp_fsf_req { 987struct zfcp_fsf_req {
988 struct list_head list; /* list of FSF requests */ 988 struct list_head list; /* list of FSF requests */
989 unsigned long req_id; /* unique request ID */
989 struct zfcp_adapter *adapter; /* adapter request belongs to */ 990 struct zfcp_adapter *adapter; /* adapter request belongs to */
990 u8 sbal_number; /* nr of SBALs free for use */ 991 u8 sbal_number; /* nr of SBALs free for use */
991 u8 sbal_first; /* first SBAL for this request */ 992 u8 sbal_first; /* first SBAL for this request */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8ec8da0beaa..7f60b6fdf72 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); 67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); 68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 95
96static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); 96static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static int zfcp_erp_action_dismiss_port(struct zfcp_port *); 97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
99static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
100 99
101static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, 100static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
102 struct zfcp_port *, struct zfcp_unit *); 101 struct zfcp_port *, struct zfcp_unit *);
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
135 zfcp_erp_adapter_reopen(adapter, 0); 134 zfcp_erp_adapter_reopen(adapter, 0);
136} 135}
137 136
138/* 137/**
139 * function: zfcp_fsf_scsi_er_timeout_handler 138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
140 * 139 *
141 * purpose: This function needs to be called whenever a SCSI error recovery 140 * This function needs to be called whenever a SCSI error recovery
142 * action (abort/reset) does not return. 141 * action (abort/reset) does not return. Re-opening the adapter means
143 * Re-opening the adapter means that the command can be returned 142 * that the abort/reset command can be returned by zfcp. It won't complete
144 * by zfcp (it is guarranteed that it does not return via the 143 * via the adapter anymore (because qdio queues are closed). If ERP is
145 * adapter anymore). The buffer can then be used again. 144 * already running on this adapter it will be stopped.
146 *
147 * returns: sod all
148 */ 145 */
149void 146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
150zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
151{ 147{
152 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
149 unsigned long flags;
153 150
154 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
155 "Restarting all operations on the adapter %s\n", 152 "Restarting all operations on the adapter %s\n",
156 zfcp_get_busid_by_adapter(adapter)); 153 zfcp_get_busid_by_adapter(adapter));
157 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
158 zfcp_erp_adapter_reopen(adapter, 0);
159 155
160 return; 156 write_lock_irqsave(&adapter->erp_lock, flags);
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
158 &adapter->status)) {
159 zfcp_erp_modify_adapter_status(adapter,
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
161 ZFCP_CLEAR);
162 zfcp_erp_action_dismiss_adapter(adapter);
163 write_unlock_irqrestore(&adapter->erp_lock, flags);
164 /* dismiss all pending requests including requests for ERP */
165 zfcp_fsf_req_dismiss_all(adapter);
166 adapter->fsf_req_seq_no = 0;
167 } else
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0);
161} 170}
162 171
163/* 172/*
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
670 return retval; 679 return retval;
671} 680}
672 681
673/* 682/**
674 * function: 683 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
675 *
676 * purpose: disable I/O,
677 * return any open requests and clean them up,
678 * aim: no pending and incoming I/O
679 *
680 * returns:
681 */ 684 */
682static void 685static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
683zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
684{ 686{
685 debug_text_event(adapter->erp_dbf, 6, "a_bl"); 687 debug_text_event(adapter->erp_dbf, 6, "a_bl");
686 zfcp_erp_modify_adapter_status(adapter, 688 zfcp_erp_modify_adapter_status(adapter,
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
688 clear_mask, ZFCP_CLEAR); 690 clear_mask, ZFCP_CLEAR);
689} 691}
690 692
691/* 693/**
692 * function: 694 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
693 *
694 * purpose: enable I/O
695 *
696 * returns:
697 */ 695 */
698static void 696static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
699zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
700{ 697{
701 debug_text_event(adapter->erp_dbf, 6, "a_ubl"); 698 debug_text_event(adapter->erp_dbf, 6, "a_ubl");
702 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 699 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
848 struct zfcp_adapter *adapter = erp_action->adapter; 845 struct zfcp_adapter *adapter = erp_action->adapter;
849 846
850 if (erp_action->fsf_req) { 847 if (erp_action->fsf_req) {
851 /* take lock to ensure that request is not being deleted meanwhile */ 848 /* take lock to ensure that request is not deleted meanwhile */
852 spin_lock(&adapter->fsf_req_list_lock); 849 spin_lock(&adapter->req_list_lock);
853 /* check whether fsf req does still exist */ 850 if ((!zfcp_reqlist_ismember(adapter,
854 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 851 erp_action->fsf_req->req_id)) &&
855 if (fsf_req == erp_action->fsf_req) 852 (fsf_req->erp_action == erp_action)) {
856 break;
857 if (fsf_req && (fsf_req->erp_action == erp_action)) {
858 /* fsf_req still exists */ 853 /* fsf_req still exists */
859 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 854 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
860 debug_event(adapter->erp_dbf, 3, &fsf_req, 855 debug_event(adapter->erp_dbf, 3, &fsf_req,
861 sizeof (unsigned long)); 856 sizeof (unsigned long));
862 /* dismiss fsf_req of timed out or dismissed erp_action */ 857 /* dismiss fsf_req of timed out/dismissed erp_action */
863 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | 858 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
864 ZFCP_STATUS_ERP_TIMEDOUT)) { 859 ZFCP_STATUS_ERP_TIMEDOUT)) {
865 debug_text_event(adapter->erp_dbf, 3, 860 debug_text_event(adapter->erp_dbf, 3,
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
892 */ 887 */
893 erp_action->fsf_req = NULL; 888 erp_action->fsf_req = NULL;
894 } 889 }
895 spin_unlock(&adapter->fsf_req_list_lock); 890 spin_unlock(&adapter->req_list_lock);
896 } else 891 } else
897 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); 892 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
898 893
899 return retval; 894 return retval;
900} 895}
901 896
902/* 897/**
903 * purpose: generic handler for asynchronous events related to erp_action events 898 * zfcp_erp_async_handler_nolock - complete erp_action
904 * (normal completion, time-out, dismissing, retry after
905 * low memory condition)
906 *
907 * note: deletion of timer is not required (e.g. in case of a time-out),
908 * but a second try does no harm,
909 * we leave it in here to allow for greater simplification
910 * 899 *
911 * returns: 0 - there was an action to handle 900 * Used for normal completion, time-out, dismissal and failure after
912 * !0 - otherwise 901 * low memory condition.
913 */ 902 */
914static int 903static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
915zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 904 unsigned long set_mask)
916 unsigned long set_mask)
917{ 905{
918 int retval;
919 struct zfcp_adapter *adapter = erp_action->adapter; 906 struct zfcp_adapter *adapter = erp_action->adapter;
920 907
921 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { 908 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
926 del_timer(&erp_action->timer); 913 del_timer(&erp_action->timer);
927 erp_action->status |= set_mask; 914 erp_action->status |= set_mask;
928 zfcp_erp_action_ready(erp_action); 915 zfcp_erp_action_ready(erp_action);
929 retval = 0;
930 } else { 916 } else {
931 /* action is ready or gone - nothing to do */ 917 /* action is ready or gone - nothing to do */
932 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); 918 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
933 debug_event(adapter->erp_dbf, 3, &erp_action->action, 919 debug_event(adapter->erp_dbf, 3, &erp_action->action,
934 sizeof (int)); 920 sizeof (int));
935 retval = 1;
936 } 921 }
937
938 return retval;
939} 922}
940 923
941/* 924/**
942 * purpose: generic handler for asynchronous events related to erp_action 925 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
943 * events (normal completion, time-out, dismissing, retry after
944 * low memory condition)
945 *
946 * note: deletion of timer is not required (e.g. in case of a time-out),
947 * but a second try does no harm,
948 * we leave it in here to allow for greater simplification
949 *
950 * returns: 0 - there was an action to handle
951 * !0 - otherwise
952 */ 926 */
953int 927void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
954zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, 928 unsigned long set_mask)
955 unsigned long set_mask)
956{ 929{
957 struct zfcp_adapter *adapter = erp_action->adapter; 930 struct zfcp_adapter *adapter = erp_action->adapter;
958 unsigned long flags; 931 unsigned long flags;
959 int retval;
960 932
961 write_lock_irqsave(&adapter->erp_lock, flags); 933 write_lock_irqsave(&adapter->erp_lock, flags);
962 retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); 934 zfcp_erp_async_handler_nolock(erp_action, set_mask);
963 write_unlock_irqrestore(&adapter->erp_lock, flags); 935 write_unlock_irqrestore(&adapter->erp_lock, flags);
964
965 return retval;
966} 936}
967 937
968/* 938/*
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
999 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); 969 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
1000} 970}
1001 971
1002/* 972/**
1003 * purpose: is called for an erp_action which needs to be ended 973 * zfcp_erp_action_dismiss - dismiss an erp_action
1004 * though not being done,
1005 * this is usually required if an higher is generated,
1006 * action gets an appropriate flag and will be processed
1007 * accordingly
1008 * 974 *
1009 * locks: erp_lock held (thus we need to call another handler variant) 975 * adapter->erp_lock must be held
976 *
977 * Dismissal of an erp_action is usually required if an erp_action of
978 * higher priority is generated.
1010 */ 979 */
1011static int 980static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1012zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1013{ 981{
1014 struct zfcp_adapter *adapter = erp_action->adapter; 982 struct zfcp_adapter *adapter = erp_action->adapter;
1015 983
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1017 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); 985 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1018 986
1019 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); 987 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
1020
1021 return 0;
1022} 988}
1023 989
1024int 990int
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2074 return retval; 2040 return retval;
2075} 2041}
2076 2042
2077/* 2043/**
2078 * function: zfcp_qdio_cleanup 2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2079 *
2080 * purpose: cleans up QDIO operation for the specified adapter
2081 *
2082 * returns: 0 - successful cleanup
2083 * !0 - failed cleanup
2084 */ 2045 */
2085int 2046static void
2086zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) 2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2087{ 2048{
2088 int retval = ZFCP_ERP_SUCCEEDED;
2089 int first_used; 2049 int first_used;
2090 int used_count; 2050 int used_count;
2091 struct zfcp_adapter *adapter = erp_action->adapter; 2051 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2094 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " 2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2095 "queues on adapter %s\n", 2055 "queues on adapter %s\n",
2096 zfcp_get_busid_by_adapter(adapter)); 2056 zfcp_get_busid_by_adapter(adapter));
2097 retval = ZFCP_ERP_FAILED; 2057 return;
2098 goto out;
2099 } 2058 }
2100 2059
2101 /* 2060 /*
2102 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that 2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2103 * do_QDIO won't be called while qdio_shutdown is in progress. 2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2104 */ 2063 */
2105
2106 write_lock_irq(&adapter->request_queue.queue_lock); 2064 write_lock_irq(&adapter->request_queue.queue_lock);
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2108 write_unlock_irq(&adapter->request_queue.queue_lock); 2066 write_unlock_irq(&adapter->request_queue.queue_lock);
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2134 adapter->request_queue.free_index = 0; 2092 adapter->request_queue.free_index = 0;
2135 atomic_set(&adapter->request_queue.free_count, 0); 2093 atomic_set(&adapter->request_queue.free_count, 0);
2136 adapter->request_queue.distance_from_int = 0; 2094 adapter->request_queue.distance_from_int = 0;
2137 out:
2138 return retval;
2139} 2095}
2140 2096
2141static int 2097static int
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2258 "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2214 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2259 ret = ZFCP_ERP_FAILED; 2215 ret = ZFCP_ERP_FAILED;
2260 } 2216 }
2261 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2217
2262 ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2218 /* don't treat as error for the sake of compatibility */
2219 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2220 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2263 "%s\n", zfcp_get_busid_by_adapter(adapter)); 2221 "%s\n", zfcp_get_busid_by_adapter(adapter));
2264 ret = ZFCP_ERP_FAILED;
2265 }
2266 2222
2267 return ret; 2223 return ret;
2268} 2224}
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2292 return retval; 2248 return retval;
2293} 2249}
2294 2250
2295/* 2251/**
2296 * function: zfcp_fsf_cleanup 2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2297 *
2298 * purpose: cleanup FSF operation for specified adapter
2299 *
2300 * returns: 0 - FSF operation successfully cleaned up
2301 * !0 - failed to cleanup FSF operation for this adapter
2302 */ 2253 */
2303static int 2254static void
2304zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) 2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2305{ 2256{
2306 int retval = ZFCP_ERP_SUCCEEDED;
2307 struct zfcp_adapter *adapter = erp_action->adapter; 2257 struct zfcp_adapter *adapter = erp_action->adapter;
2308 2258
2309 /* 2259 /*
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2317 /* all ports and units are closed */ 2267 /* all ports and units are closed */
2318 zfcp_erp_modify_adapter_status(adapter, 2268 zfcp_erp_modify_adapter_status(adapter,
2319 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2320
2321 return retval;
2322} 2270}
2323 2271
2324/* 2272/*
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3293} 3241}
3294 3242
3295 3243
3296static int 3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3297zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3298{ 3245{
3299 int retval = 0;
3300 struct zfcp_port *port; 3246 struct zfcp_port *port;
3301 3247
3302 debug_text_event(adapter->erp_dbf, 5, "a_actab"); 3248 debug_text_event(adapter->erp_dbf, 5, "a_actab");
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3305 else 3251 else
3306 list_for_each_entry(port, &adapter->port_list_head, list) 3252 list_for_each_entry(port, &adapter->port_list_head, list)
3307 zfcp_erp_action_dismiss_port(port); 3253 zfcp_erp_action_dismiss_port(port);
3308
3309 return retval;
3310} 3254}
3311 3255
3312static int 3256static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3313zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3314{ 3257{
3315 int retval = 0;
3316 struct zfcp_unit *unit; 3258 struct zfcp_unit *unit;
3317 struct zfcp_adapter *adapter = port->adapter; 3259 struct zfcp_adapter *adapter = port->adapter;
3318 3260
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3323 else 3265 else
3324 list_for_each_entry(unit, &port->unit_list_head, list) 3266 list_for_each_entry(unit, &port->unit_list_head, list)
3325 zfcp_erp_action_dismiss_unit(unit); 3267 zfcp_erp_action_dismiss_unit(unit);
3326
3327 return retval;
3328} 3268}
3329 3269
3330static int 3270static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3331zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3332{ 3271{
3333 int retval = 0;
3334 struct zfcp_adapter *adapter = unit->port->adapter; 3272 struct zfcp_adapter *adapter = unit->port->adapter;
3335 3273
3336 debug_text_event(adapter->erp_dbf, 5, "u_actab"); 3274 debug_text_event(adapter->erp_dbf, 5, "u_actab");
3337 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); 3275 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3338 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 3276 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
3339 zfcp_erp_action_dismiss(&unit->erp_action); 3277 zfcp_erp_action_dismiss(&unit->erp_action);
3340
3341 return retval;
3342} 3278}
3343 3279
3344static inline void 3280static inline void
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index d02366004cd..146d7a2b4c4 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
63extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 63extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
65 struct zfcp_fsf_req *); 65 struct zfcp_fsf_req *);
66extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
67 66
68extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
69 (struct zfcp_fsf_req *, int, int); 68 (struct zfcp_fsf_req *, int, int);
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
140extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
141extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
142extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 143
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 145extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
156extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 156extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
157extern int zfcp_erp_thread_kill(struct zfcp_adapter *); 157extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
158extern int zfcp_erp_wait(struct zfcp_adapter *); 158extern int zfcp_erp_wait(struct zfcp_adapter *);
159extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); 159extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
160 160
161extern int zfcp_test_link(struct zfcp_port *); 161extern int zfcp_test_link(struct zfcp_port *);
162 162
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
190 struct zfcp_fsf_req *); 190 struct zfcp_fsf_req *);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 192 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
194extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
195extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
196 unsigned long);
197extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
193 198
194#endif /* ZFCP_EXT_H */ 199#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 31db2b06fab..ff2eacf5ec8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, 49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
50 struct fsf_link_down_info *); 50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
53 52
54/* association between FSF command and FSF QTCB type */ 53/* association between FSF command and FSF QTCB type */
55static u32 fsf_qtcb_type[] = { 54static u32 fsf_qtcb_type[] = {
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
146 kfree(fsf_req); 145 kfree(fsf_req);
147} 146}
148 147
149/* 148/**
150 * function: 149 * zfcp_fsf_req_dismiss - dismiss a single fsf request
151 *
152 * purpose:
153 *
154 * returns:
155 *
156 * note: qdio queues shall be down (no ongoing inbound processing)
157 */ 150 */
158int 151static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
159zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 152 struct zfcp_fsf_req *fsf_req,
153 unsigned int counter)
160{ 154{
161 struct zfcp_fsf_req *fsf_req, *tmp; 155 u64 dbg_tmp[2];
162 unsigned long flags;
163 LIST_HEAD(remove_queue);
164 156
165 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 157 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
166 list_splice_init(&adapter->fsf_req_list_head, &remove_queue); 158 dbg_tmp[1] = (u64) counter;
167 atomic_set(&adapter->fsf_reqs_active, 0); 159 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
168 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 160 list_del(&fsf_req->list);
169 161 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
170 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 162 zfcp_fsf_req_complete(fsf_req);
171 list_del(&fsf_req->list);
172 zfcp_fsf_req_dismiss(fsf_req);
173 }
174
175 return 0;
176} 163}
177 164
178/* 165/**
179 * function: 166 * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
180 *
181 * purpose:
182 *
183 * returns:
184 */ 167 */
185static void 168int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
186zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
187{ 169{
188 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 170 struct zfcp_fsf_req *request, *tmp;
189 zfcp_fsf_req_complete(fsf_req); 171 unsigned long flags;
172 unsigned int i, counter;
173
174 spin_lock_irqsave(&adapter->req_list_lock, flags);
175 atomic_set(&adapter->reqs_active, 0);
176 for (i=0; i<REQUEST_LIST_SIZE; i++) {
177 if (list_empty(&adapter->req_list[i]))
178 continue;
179
180 counter = 0;
181 list_for_each_entry_safe(request, tmp,
182 &adapter->req_list[i], list) {
183 zfcp_fsf_req_dismiss(adapter, request, counter);
184 counter++;
185 }
186 }
187 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
188
189 return 0;
190} 190}
191 191
192/* 192/*
@@ -4592,12 +4592,14 @@ static inline void
4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4593{ 4593{
4594 if (likely(fsf_req->qtcb != NULL)) { 4594 if (likely(fsf_req->qtcb != NULL)) {
4595 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; 4595 fsf_req->qtcb->prefix.req_seq_no =
4596 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4596 fsf_req->adapter->fsf_req_seq_no;
4597 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4597 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4598 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4598 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; 4599 fsf_req->qtcb->prefix.qtcb_type =
4600 fsf_qtcb_type[fsf_req->fsf_command];
4599 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4601 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4600 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4602 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4601 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; 4603 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4602 } 4604 }
4603} 4605}
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4654{ 4656{
4655 volatile struct qdio_buffer_element *sbale; 4657 volatile struct qdio_buffer_element *sbale;
4656 struct zfcp_fsf_req *fsf_req = NULL; 4658 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4657 int ret = 0; 4660 int ret = 0;
4658 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4659 4662
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4668 4671
4669 fsf_req->adapter = adapter; 4672 fsf_req->adapter = adapter;
4670 fsf_req->fsf_command = fsf_cmd; 4673 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list);
4675
4676 /* unique request id */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags);
4678 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4671 4680
4672 zfcp_fsf_req_qtcb_init(fsf_req); 4681 zfcp_fsf_req_qtcb_init(fsf_req);
4673 4682
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4707 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 4716 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4708 4717
4709 /* setup common SBALE fields */ 4718 /* setup common SBALE fields */
4710 sbale[0].addr = fsf_req; 4719 sbale[0].addr = (void *) fsf_req->req_id;
4711 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 4720 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
4712 if (likely(fsf_req->qtcb != NULL)) { 4721 if (likely(fsf_req->qtcb != NULL)) {
4713 sbale[1].addr = (void *) fsf_req->qtcb; 4722 sbale[1].addr = (void *) fsf_req->qtcb;
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4747 volatile struct qdio_buffer_element *sbale; 4756 volatile struct qdio_buffer_element *sbale;
4748 int inc_seq_no; 4757 int inc_seq_no;
4749 int new_distance_from_int; 4758 int new_distance_from_int;
4750 unsigned long flags; 4759 u64 dbg_tmp[2];
4751 int retval = 0; 4760 int retval = 0;
4752 4761
4753 adapter = fsf_req->adapter; 4762 adapter = fsf_req->adapter;
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4761 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4770 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4762 sbale[1].length); 4771 sbale[1].length);
4763 4772
4764 /* put allocated FSF request at list tail */ 4773 /* put allocated FSF request into hash table */
4765 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4774 spin_lock(&adapter->req_list_lock);
4766 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4775 zfcp_reqlist_add(adapter, fsf_req);
4767 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4776 spin_unlock(&adapter->req_list_lock);
4768 4777
4769 inc_seq_no = (fsf_req->qtcb != NULL); 4778 inc_seq_no = (fsf_req->qtcb != NULL);
4770 4779
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4803 QDIO_FLAG_SYNC_OUTPUT, 4812 QDIO_FLAG_SYNC_OUTPUT,
4804 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4813 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
4805 4814
4815 dbg_tmp[0] = (unsigned long) sbale[0].addr;
4816 dbg_tmp[1] = (u64) retval;
4817 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
4818
4806 if (unlikely(retval)) { 4819 if (unlikely(retval)) {
4807 /* Queues are down..... */ 4820 /* Queues are down..... */
4808 retval = -EIO; 4821 retval = -EIO;
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4812 */ 4825 */
4813 if (timer) 4826 if (timer)
4814 del_timer(timer); 4827 del_timer(timer);
4815 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4828 spin_lock(&adapter->req_list_lock);
4816 list_del(&fsf_req->list); 4829 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4817 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4830 spin_unlock(&adapter->req_list_lock);
4818 /* 4831 /* undo changes in request queue made for this request */
4819 * adjust the number of free SBALs in request queue as well as
4820 * position of first one
4821 */
4822 zfcp_qdio_zero_sbals(req_queue->buffer, 4832 zfcp_qdio_zero_sbals(req_queue->buffer,
4823 fsf_req->sbal_first, fsf_req->sbal_number); 4833 fsf_req->sbal_first, fsf_req->sbal_number);
4824 atomic_add(fsf_req->sbal_number, &req_queue->free_count); 4834 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4825 req_queue->free_index -= fsf_req->sbal_number; /* increase */ 4835 req_queue->free_index -= fsf_req->sbal_number;
4826 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; 4836 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4827 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 4837 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4828 ZFCP_LOG_DEBUG 4838 zfcp_erp_adapter_reopen(adapter, 0);
4829 ("error: do_QDIO failed. Buffers could not be enqueued "
4830 "to request queue.\n");
4831 } else { 4839 } else {
4832 req_queue->distance_from_int = new_distance_from_int; 4840 req_queue->distance_from_int = new_distance_from_int;
4833 /* 4841 /*
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4843 adapter->fsf_req_seq_no++; 4851 adapter->fsf_req_seq_no++;
4844 4852
4845 /* count FSF requests pending */ 4853 /* count FSF requests pending */
4846 atomic_inc(&adapter->fsf_reqs_active); 4854 atomic_inc(&adapter->reqs_active);
4847 } 4855 }
4848 return retval; 4856 return retval;
4849} 4857}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 49ea5add4ab..dbd9f48e863 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 return; 282 return;
283} 283}
284 284
285/**
286 * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
287 */
288static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
289 unsigned long req_id)
290{
291 struct zfcp_fsf_req *fsf_req;
292 unsigned long flags;
293
294 debug_long_event(adapter->erp_dbf, 4, req_id);
295
296 spin_lock_irqsave(&adapter->req_list_lock, flags);
297 fsf_req = zfcp_reqlist_ismember(adapter, req_id);
298
299 if (!fsf_req) {
300 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
301 ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
302 zfcp_erp_adapter_reopen(adapter, 0);
303 return -EINVAL;
304 }
305
306 zfcp_reqlist_remove(adapter, req_id);
307 atomic_dec(&adapter->reqs_active);
308 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
309
310 /* finish the FSF request */
311 zfcp_fsf_req_complete(fsf_req);
312
313 return 0;
314}
315
285/* 316/*
286 * function: zfcp_qdio_response_handler 317 * function: zfcp_qdio_response_handler
287 * 318 *
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
344 /* look for QDIO request identifiers in SB */ 375 /* look for QDIO request identifiers in SB */
345 buffere = &buffer->element[buffere_index]; 376 buffere = &buffer->element[buffere_index];
346 retval = zfcp_qdio_reqid_check(adapter, 377 retval = zfcp_qdio_reqid_check(adapter,
347 (void *) buffere->addr); 378 (unsigned long) buffere->addr);
348 379
349 if (retval) { 380 if (retval) {
350 ZFCP_LOG_NORMAL("bug: unexpected inbound " 381 ZFCP_LOG_NORMAL("bug: unexpected inbound "
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
415 return; 446 return;
416} 447}
417 448
418/*
419 * function: zfcp_qdio_reqid_check
420 *
421 * purpose: checks for valid reqids or unsolicited status
422 *
423 * returns: 0 - valid request id or unsolicited status
424 * !0 - otherwise
425 */
426int
427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
428{
429 struct zfcp_fsf_req *fsf_req;
430 unsigned long flags;
431
432 /* invalid (per convention used in this driver) */
433 if (unlikely(!sbale_addr)) {
434 ZFCP_LOG_NORMAL("bug: invalid reqid\n");
435 return -EINVAL;
436 }
437
438 /* valid request id and thus (hopefully :) valid fsf_req address */
439 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
440
441 /* serialize with zfcp_fsf_req_dismiss_all */
442 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
443 if (list_empty(&adapter->fsf_req_list_head)) {
444 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
445 return 0;
446 }
447 list_del(&fsf_req->list);
448 atomic_dec(&adapter->fsf_reqs_active);
449 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
450
451 if (unlikely(adapter != fsf_req->adapter)) {
452 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
453 "fsf_req->adapter=%p, adapter=%p)\n",
454 fsf_req, fsf_req->adapter, adapter);
455 return -EINVAL;
456 }
457
458 /* finish the FSF request */
459 zfcp_fsf_req_complete(fsf_req);
460
461 return 0;
462}
463
464/** 449/**
465 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 450 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
466 * @queue: queue from which SBALE should be returned 451 * @queue: queue from which SBALE should be returned
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 671f4a6a5d1..1bb55086db9 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
30 void (*done) (struct scsi_cmnd *)); 30 void (*done) (struct scsi_cmnd *));
31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); 31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 33static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
35static int zfcp_task_management_function(struct zfcp_unit *, u8, 34static int zfcp_task_management_function(struct zfcp_unit *, u8,
36 struct scsi_cmnd *); 35 struct scsi_cmnd *);
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = {
46 .scsi_host_template = { 45 .scsi_host_template = {
47 .name = ZFCP_NAME, 46 .name = ZFCP_NAME,
48 .proc_name = "zfcp", 47 .proc_name = "zfcp",
49 .proc_info = NULL,
50 .detect = NULL,
51 .slave_alloc = zfcp_scsi_slave_alloc, 48 .slave_alloc = zfcp_scsi_slave_alloc,
52 .slave_configure = zfcp_scsi_slave_configure, 49 .slave_configure = zfcp_scsi_slave_configure,
53 .slave_destroy = zfcp_scsi_slave_destroy, 50 .slave_destroy = zfcp_scsi_slave_destroy,
54 .queuecommand = zfcp_scsi_queuecommand, 51 .queuecommand = zfcp_scsi_queuecommand,
55 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 52 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
56 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 53 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
57 .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, 54 .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 55 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
59 .can_queue = 4096, 56 .can_queue = 4096,
60 .this_id = -1, 57 .this_id = -1,
61 /*
62 * FIXME:
63 * one less? can zfcp_create_sbale cope with it?
64 */
65 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, 58 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
66 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
67 .unchecked_isa_dma = 0,
68 .use_clustering = 1, 60 .use_clustering = 1,
69 .sdev_attrs = zfcp_sysfs_sdev_attrs, 61 .sdev_attrs = zfcp_sysfs_sdev_attrs,
70 }, 62 },
71 .driver_version = ZFCP_VERSION, 63 .driver_version = ZFCP_VERSION,
72 /* rest initialised with zeros */
73}; 64};
74 65
75/* Find start of Response Information in FCP response unit*/ 66/* Find start of Response Information in FCP response unit*/
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
176 return retval; 167 return retval;
177} 168}
178 169
179static void 170/**
180zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 171 * zfcp_scsi_slave_destroy - called when scsi device is removed
172 *
173 * Remove reference to associated scsi device for an zfcp_unit.
174 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
175 * or a scan for this device might have failed.
176 */
177static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
181{ 178{
182 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 179 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
183 180
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
185 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 182 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
186 sdpnt->hostdata = NULL; 183 sdpnt->hostdata = NULL;
187 unit->device = NULL; 184 unit->device = NULL;
185 zfcp_erp_unit_failed(unit);
188 zfcp_unit_put(unit); 186 zfcp_unit_put(unit);
189 } else { 187 } else {
190 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " 188 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
549} 547}
550 548
551/** 549/**
552 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) 550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 553 */
554int 554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
556{ 555{
557 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; 556 struct zfcp_unit *unit;
558 struct zfcp_adapter *adapter = unit->port->adapter; 557 struct zfcp_adapter *adapter;
559 558 unsigned long flags;
560 ZFCP_LOG_NORMAL("bus reset because of problems with "
561 "unit 0x%016Lx\n", unit->fcp_lun);
562 zfcp_erp_adapter_reopen(adapter, 0);
563 zfcp_erp_wait(adapter);
564
565 return SUCCESS;
566}
567 559
568/** 560 unit = (struct zfcp_unit*) scpnt->device->hostdata;
569 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) 561 adapter = unit->port->adapter;
570 */
571int
572zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
573{
574 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
575 struct zfcp_adapter *adapter = unit->port->adapter;
576 562
577 ZFCP_LOG_NORMAL("host reset because of problems with " 563 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
578 "unit 0x%016Lx\n", unit->fcp_lun); 564 "unit 0x%016Lx\n", unit->fcp_lun);
579 zfcp_erp_adapter_reopen(adapter, 0); 565
580 zfcp_erp_wait(adapter); 566 write_lock_irqsave(&adapter->erp_lock, flags);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
581 582
582 return SUCCESS; 583 return SUCCESS;
583} 584}
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index 06d7601cdf5..d006a8cb4a7 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -69,6 +69,7 @@ comment "The following drivers are not fully supported"
69config SCSI_CUMANA_1 69config SCSI_CUMANA_1
70 tristate "CumanaSCSI I support (EXPERIMENTAL)" 70 tristate "CumanaSCSI I support (EXPERIMENTAL)"
71 depends on ARCH_ACORN && EXPERIMENTAL && SCSI 71 depends on ARCH_ACORN && EXPERIMENTAL && SCSI
72 select SCSI_SPI_ATTRS
72 help 73 help
73 This enables support for the Cumana SCSI I card. If you have an 74 This enables support for the Cumana SCSI I card. If you have an
74 Acorn system with one of these, say Y. If unsure, say N. 75 Acorn system with one of these, say Y. If unsure, say N.
@@ -76,6 +77,7 @@ config SCSI_CUMANA_1
76config SCSI_ECOSCSI 77config SCSI_ECOSCSI
77 tristate "EcoScsi support (EXPERIMENTAL)" 78 tristate "EcoScsi support (EXPERIMENTAL)"
78 depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI 79 depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI
80 select SCSI_SPI_ATTRS
79 help 81 help
80 This enables support for the EcoSCSI card -- a small card that sits 82 This enables support for the EcoSCSI card -- a small card that sits
81 in the Econet socket. If you have an Acorn system with one of these, 83 in the Econet socket. If you have an Acorn system with one of these,
@@ -84,6 +86,7 @@ config SCSI_ECOSCSI
84config SCSI_OAK1 86config SCSI_OAK1
85 tristate "Oak SCSI support (EXPERIMENTAL)" 87 tristate "Oak SCSI support (EXPERIMENTAL)"
86 depends on ARCH_ACORN && EXPERIMENTAL && SCSI 88 depends on ARCH_ACORN && EXPERIMENTAL && SCSI
89 select SCSI_SPI_ATTRS
87 help 90 help
88 This enables support for the Oak SCSI card. If you have an Acorn 91 This enables support for the Oak SCSI card. If you have an Acorn
89 system with one of these, say Y. If unsure, say N. 92 system with one of these, say Y. If unsure, say N.
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 6dd544a5eb5..8c2600ffc6a 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt)
74 unsigned long len = 0; 74 unsigned long len = 0;
75 int buf; 75 int buf;
76 76
77 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer; 77 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
78 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 78 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
79 SCpnt->SCp.ptr = (char *) 79 SCpnt->SCp.ptr = (char *)
80 (page_address(SCpnt->SCp.buffer->page) + 80 (page_address(SCpnt->SCp.buffer->page) +
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 19745a31072..2d20caf377f 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -390,7 +390,8 @@ static struct ata_port_info piix_port_info[] = {
390 /* ich5_sata */ 390 /* ich5_sata */
391 { 391 {
392 .sht = &piix_sht, 392 .sht = &piix_sht,
393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
394 PIIX_FLAG_IGNORE_PCS,
394 .pio_mask = 0x1f, /* pio0-4 */ 395 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */ 396 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */ 397 .udma_mask = 0x7f, /* udma0-6 */
@@ -467,6 +468,11 @@ MODULE_LICENSE("GPL");
467MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 468MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
468MODULE_VERSION(DRV_VERSION); 469MODULE_VERSION(DRV_VERSION);
469 470
471static int force_pcs = 0;
472module_param(force_pcs, int, 0444);
473MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
474 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
475
470/** 476/**
471 * piix_pata_cbl_detect - Probe host controller cable detect info 477 * piix_pata_cbl_detect - Probe host controller cable detect info
472 * @ap: Port for which cable detect info is desired 478 * @ap: Port for which cable detect info is desired
@@ -531,27 +537,25 @@ static void piix_pata_error_handler(struct ata_port *ap)
531} 537}
532 538
533/** 539/**
534 * piix_sata_prereset - prereset for SATA host controller 540 * piix_sata_present_mask - determine present mask for SATA host controller
535 * @ap: Target port 541 * @ap: Target port
536 * 542 *
537 * Reads and configures SATA PCI device's PCI config register 543 * Reads SATA PCI device's PCI config register Port Configuration
538 * Port Configuration and Status (PCS) to determine port and 544 * and Status (PCS) to determine port and device availability.
539 * device availability. Return -ENODEV to skip reset if no
540 * device is present.
541 * 545 *
542 * LOCKING: 546 * LOCKING:
543 * None (inherited from caller). 547 * None (inherited from caller).
544 * 548 *
545 * RETURNS: 549 * RETURNS:
546 * 0 if device is present, -ENODEV otherwise. 550 * determined present_mask
547 */ 551 */
548static int piix_sata_prereset(struct ata_port *ap) 552static unsigned int piix_sata_present_mask(struct ata_port *ap)
549{ 553{
550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 554 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
551 struct piix_host_priv *hpriv = ap->host_set->private_data; 555 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map; 556 const unsigned int *map = hpriv->map;
553 int base = 2 * ap->hard_port_no; 557 int base = 2 * ap->hard_port_no;
554 unsigned int present = 0; 558 unsigned int present_mask = 0;
555 int port, i; 559 int port, i;
556 u16 pcs; 560 u16 pcs;
557 561
@@ -564,24 +568,52 @@ static int piix_sata_prereset(struct ata_port *ap)
564 continue; 568 continue;
565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || 569 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
566 (pcs & 1 << (hpriv->map_db->present_shift + port))) 570 (pcs & 1 << (hpriv->map_db->present_shift + port)))
567 present = 1; 571 present_mask |= 1 << i;
568 } 572 }
569 573
570 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 574 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
571 ap->id, pcs, present_mask); 575 ap->id, pcs, present_mask);
572 576
573 if (!present) { 577 return present_mask;
574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 578}
575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 579
576 return 0; 580/**
581 * piix_sata_softreset - reset SATA host port via ATA SRST
582 * @ap: port to reset
583 * @classes: resulting classes of attached devices
584 *
585 * Reset SATA host port via ATA SRST. On controllers with
586 * reliable PCS present bits, the bits are used to determine
587 * device presence.
588 *
589 * LOCKING:
590 * Kernel thread context (may sleep)
591 *
592 * RETURNS:
593 * 0 on success, -errno otherwise.
594 */
595static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
596{
597 unsigned int present_mask;
598 int i, rc;
599
600 present_mask = piix_sata_present_mask(ap);
601
602 rc = ata_std_softreset(ap, classes);
603 if (rc)
604 return rc;
605
606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
607 if (!(present_mask & (1 << i)))
608 classes[i] = ATA_DEV_NONE;
577 } 609 }
578 610
579 return ata_std_prereset(ap); 611 return 0;
580} 612}
581 613
582static void piix_sata_error_handler(struct ata_port *ap) 614static void piix_sata_error_handler(struct ata_port *ap)
583{ 615{
584 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, 616 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
585 ata_std_postreset); 617 ata_std_postreset);
586} 618}
587 619
@@ -785,6 +817,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
785} 817}
786 818
787static void __devinit piix_init_pcs(struct pci_dev *pdev, 819static void __devinit piix_init_pcs(struct pci_dev *pdev,
820 struct ata_port_info *pinfo,
788 const struct piix_map_db *map_db) 821 const struct piix_map_db *map_db)
789{ 822{
790 u16 pcs, new_pcs; 823 u16 pcs, new_pcs;
@@ -798,6 +831,18 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 831 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150); 832 msleep(150);
800 } 833 }
834
835 if (force_pcs == 1) {
836 dev_printk(KERN_INFO, &pdev->dev,
837 "force ignoring PCS (0x%x)\n", new_pcs);
838 pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
839 pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
840 } else if (force_pcs == 2) {
841 dev_printk(KERN_INFO, &pdev->dev,
842 "force honoring PCS (0x%x)\n", new_pcs);
843 pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
844 pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
845 }
801} 846}
802 847
803static void __devinit piix_init_sata_map(struct pci_dev *pdev, 848static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -828,6 +873,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
828 case IDE: 873 case IDE:
829 WARN_ON((i & 1) || map[i + 1] != IDE); 874 WARN_ON((i & 1) || map[i + 1] != IDE);
830 pinfo[i / 2] = piix_port_info[ich5_pata]; 875 pinfo[i / 2] = piix_port_info[ich5_pata];
876 pinfo[i / 2].private_data = hpriv;
831 i++; 877 i++;
832 printk(" IDE IDE"); 878 printk(" IDE IDE");
833 break; 879 break;
@@ -905,7 +951,8 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
905 if (host_flags & ATA_FLAG_SATA) { 951 if (host_flags & ATA_FLAG_SATA) {
906 piix_init_sata_map(pdev, port_info, 952 piix_init_sata_map(pdev, port_info,
907 piix_map_db_table[ent->driver_data]); 953 piix_map_db_table[ent->driver_data]);
908 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); 954 piix_init_pcs(pdev, port_info,
955 piix_map_db_table[ent->driver_data]);
909 } 956 }
910 957
911 /* On ICH5, some BIOSen disable the interrupt using the 958 /* On ICH5, some BIOSen disable the interrupt using the
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 98bd22714d0..5630868c1b2 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev;
1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt) 1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1147{ 1147{
1148 if (sun4_esp_physaddr) { 1148 if (sun4_esp_physaddr) {
1149 memset(&sun4_esp_dev, 0, sizeof(esp_dev)); 1149 memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; 1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
1151 sun4_esp_dev.irqs[0] = 4; 1151 sun4_esp_dev.irqs[0] = 4;
1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr; 1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1162 1162
1163static int __devexit esp_sun4_remove(void) 1163static int __devexit esp_sun4_remove(void)
1164{ 1164{
1165 struct of_device *dev = &sun4_esp_dev.ofdev;
1165 struct esp *esp = dev_get_drvdata(&dev->dev); 1166 struct esp *esp = dev_get_drvdata(&dev->dev);
1166 1167
1167 return esp_remove_common(esp); 1168 return esp_remove_common(esp);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ab2f8b26790..bcb3444f1dc 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop";
45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
46static const char driver_ver[] = "v1.0 (060426)"; 46static const char driver_ver[] = "v1.0 (060426)";
47 47
48static DEFINE_SPINLOCK(hptiop_hba_list_lock);
49static LIST_HEAD(hptiop_hba_list);
50static int hptiop_cdev_major = -1;
51
52static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
53static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 49static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
54static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 50static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
577 if (atomic_xchg(&hba->resetting, 1) == 0) { 573 if (atomic_xchg(&hba->resetting, 1) == 0) {
578 atomic_inc(&hba->reset_count); 574 atomic_inc(&hba->reset_count);
579 writel(IOPMU_INBOUND_MSG0_RESET, 575 writel(IOPMU_INBOUND_MSG0_RESET,
580 &hba->iop->outbound_msgaddr0); 576 &hba->iop->inbound_msgaddr0);
581 hptiop_pci_posting_flush(hba->iop); 577 hptiop_pci_posting_flush(hba->iop);
582 } 578 }
583 579
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
620 return queue_depth; 616 return queue_depth;
621} 617}
622 618
623struct hptiop_getinfo {
624 char __user *buffer;
625 loff_t buflength;
626 loff_t bufoffset;
627 loff_t buffillen;
628 loff_t filpos;
629};
630
631static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
632 char *data, int datalen)
633{
634 if (pinfo->filpos < pinfo->bufoffset) {
635 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
636 pinfo->filpos += datalen;
637 return;
638 } else {
639 data += (pinfo->bufoffset - pinfo->filpos);
640 datalen -= (pinfo->bufoffset - pinfo->filpos);
641 pinfo->filpos = pinfo->bufoffset;
642 }
643 }
644
645 pinfo->filpos += datalen;
646 if (pinfo->buffillen == pinfo->buflength)
647 return;
648
649 if (pinfo->buflength - pinfo->buffillen < datalen)
650 datalen = pinfo->buflength - pinfo->buffillen;
651
652 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
653 return;
654
655 pinfo->buffillen += datalen;
656}
657
658static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
659{
660 va_list args;
661 char buf[128];
662 int len;
663
664 va_start(args, fmt);
665 len = vsnprintf(buf, sizeof(buf), fmt, args);
666 va_end(args);
667 hptiop_copy_mem_info(pinfo, buf, len);
668 return len;
669}
670
671static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
672{
673 arg->done = NULL;
674 wake_up(&arg->hba->ioctl_wq);
675}
676
677static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
678{
679 struct hptiop_hba *hba = arg->hba;
680 u32 val;
681 struct hpt_iop_request_ioctl_command __iomem *req;
682 int ioctl_retry = 0;
683
684 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
685
686 /*
687 * check (in + out) buff size from application.
688 * outbuf must be dword aligned.
689 */
690 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
691 hba->max_request_size
692 - sizeof(struct hpt_iop_request_header)
693 - 4 * sizeof(u32)) {
694 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
695 hba->host->host_no,
696 arg->inbuf_size, arg->outbuf_size);
697 arg->result = HPT_IOCTL_RESULT_FAILED;
698 return;
699 }
700
701retry:
702 spin_lock_irq(hba->host->host_lock);
703
704 val = readl(&hba->iop->inbound_queue);
705 if (val == IOPMU_QUEUE_EMPTY) {
706 spin_unlock_irq(hba->host->host_lock);
707 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
708 arg->result = -1;
709 return;
710 }
711
712 req = (struct hpt_iop_request_ioctl_command __iomem *)
713 ((unsigned long)hba->iop + val);
714
715 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
716 &req->ioctl_code);
717 writel(arg->inbuf_size, &req->inbuf_size);
718 writel(arg->outbuf_size, &req->outbuf_size);
719
720 /*
721 * use the buffer on the IOP local memory first, then copy it
722 * back to host.
723 * the caller's request buffer shoudl be little-endian.
724 */
725 if (arg->inbuf_size)
726 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
727
728 /* correct the controller ID for IOP */
729 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
731 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
732 && arg->inbuf_size >= sizeof(u32))
733 writel(0, req->buf);
734
735 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
736 writel(0, &req->header.flags);
737 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
738 + arg->inbuf_size, &req->header.size);
739 writel((u32)(unsigned long)arg, &req->header.context);
740 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
741 &req->header.context_hi32);
742 writel(IOP_RESULT_PENDING, &req->header.result);
743
744 arg->result = HPT_IOCTL_RESULT_FAILED;
745 arg->done = hptiop_ioctl_done;
746
747 writel(val, &hba->iop->inbound_queue);
748 hptiop_pci_posting_flush(hba->iop);
749
750 spin_unlock_irq(hba->host->host_lock);
751
752 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
753
754 if (arg->done != NULL) {
755 hptiop_reset_hba(hba);
756 if (ioctl_retry++ < 3)
757 goto retry;
758 }
759
760 dprintk("hpt_iop_ioctl %x result %d\n",
761 arg->ioctl_code, arg->result);
762}
763
764static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
765 u32 insize, void *outbuf, u32 outsize)
766{
767 struct hpt_ioctl_k arg;
768 arg.hba = hba;
769 arg.ioctl_code = code;
770 arg.inbuf = inbuf;
771 arg.outbuf = outbuf;
772 arg.inbuf_size = insize;
773 arg.outbuf_size = outsize;
774 arg.bytes_returned = NULL;
775 hptiop_do_ioctl(&arg);
776 return arg.result;
777}
778
779static inline int hpt_id_valid(__le32 id)
780{
781 return id != 0 && id != cpu_to_le32(0xffffffff);
782}
783
784static int hptiop_get_controller_info(struct hptiop_hba *hba,
785 struct hpt_controller_info *pinfo)
786{
787 int id = 0;
788
789 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
790 &id, sizeof(int), pinfo, sizeof(*pinfo));
791}
792
793
794static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
795 struct hpt_channel_info *pinfo)
796{
797 u32 ids[2];
798
799 ids[0] = 0;
800 ids[1] = bus;
801 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
802 ids, sizeof(ids), pinfo, sizeof(*pinfo));
803
804}
805
806static int hptiop_get_logical_devices(struct hptiop_hba *hba,
807 __le32 *pids, int maxcount)
808{
809 int i;
810 u32 count = maxcount - 1;
811
812 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
813 &count, sizeof(u32),
814 pids, sizeof(u32) * maxcount))
815 return -1;
816
817 maxcount = le32_to_cpu(pids[0]);
818 for (i = 0; i < maxcount; i++)
819 pids[i] = pids[i+1];
820
821 return maxcount;
822}
823
824static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
825 struct hpt_logical_device_info_v3 *pinfo)
826{
827 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
828 &id, sizeof(u32),
829 pinfo, sizeof(*pinfo));
830}
831
832static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
833{
834 static char s[64];
835 u32 flags = le32_to_cpu(devinfo->u.array.flags);
836 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
837 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
838
839 if (flags & ARRAY_FLAG_DISABLED)
840 return "Disabled";
841 else if (flags & ARRAY_FLAG_TRANSFORMING)
842 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
843 trans_prog / 100,
844 trans_prog % 100,
845 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
846 ", Critical" : "",
847 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
848 !(flags & ARRAY_FLAG_REBUILDING) &&
849 !(flags & ARRAY_FLAG_INITIALIZING))?
850 ", Unintialized" : "");
851 else if ((flags & ARRAY_FLAG_BROKEN) &&
852 devinfo->u.array.array_type != AT_RAID6)
853 return "Critical";
854 else if (flags & ARRAY_FLAG_REBUILDING)
855 sprintf(s,
856 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
857 "%sBackground initializing %d.%d%%" :
858 "%sRebuilding %d.%d%%",
859 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
860 reb_prog / 100,
861 reb_prog % 100);
862 else if (flags & ARRAY_FLAG_VERIFYING)
863 sprintf(s, "%sVerifying %d.%d%%",
864 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
865 reb_prog / 100,
866 reb_prog % 100);
867 else if (flags & ARRAY_FLAG_INITIALIZING)
868 sprintf(s, "%sForground initializing %d.%d%%",
869 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
870 reb_prog / 100,
871 reb_prog % 100);
872 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
873 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
874 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
875 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
876 !(flags & ARRAY_FLAG_REBUILDING) &&
877 !(flags & ARRAY_FLAG_INITIALIZING))?
878 ", Unintialized" : "");
879 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
880 !(flags & ARRAY_FLAG_REBUILDING) &&
881 !(flags & ARRAY_FLAG_INITIALIZING))
882 sprintf(s,"%sUninitialized",
883 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
884 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
885 (flags & ARRAY_FLAG_BROKEN))
886 return "Critical";
887 else
888 return "Normal";
889 return s;
890}
891
892static void hptiop_dump_devinfo(struct hptiop_hba *hba,
893 struct hptiop_getinfo *pinfo, __le32 id, int indent)
894{
895 struct hpt_logical_device_info_v3 devinfo;
896 int i;
897 u64 capacity;
898
899 for (i = 0; i < indent; i++)
900 hptiop_copy_info(pinfo, "\t");
901
902 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
903 hptiop_copy_info(pinfo, "unknown\n");
904 return;
905 }
906
907 switch (devinfo.type) {
908
909 case LDT_DEVICE: {
910 struct hd_driveid *driveid;
911 u32 flags = le32_to_cpu(devinfo.u.device.flags);
912
913 driveid = (struct hd_driveid *)devinfo.u.device.ident;
914 /* model[] is 40 chars long, but we just want 20 chars here */
915 driveid->model[20] = 0;
916
917 if (indent)
918 if (flags & DEVICE_FLAG_DISABLED)
919 hptiop_copy_info(pinfo,"Missing\n");
920 else
921 hptiop_copy_info(pinfo, "CH%d %s\n",
922 devinfo.u.device.path_id + 1,
923 driveid->model);
924 else {
925 capacity = le64_to_cpu(devinfo.capacity) * 512;
926 do_div(capacity, 1000000);
927 hptiop_copy_info(pinfo,
928 "CH%d %s, %lluMB, %s %s%s%s%s\n",
929 devinfo.u.device.path_id + 1,
930 driveid->model,
931 capacity,
932 (flags & DEVICE_FLAG_DISABLED)?
933 "Disabled" : "Normal",
934 devinfo.u.device.read_ahead_enabled?
935 "[RA]" : "",
936 devinfo.u.device.write_cache_enabled?
937 "[WC]" : "",
938 devinfo.u.device.TCQ_enabled?
939 "[TCQ]" : "",
940 devinfo.u.device.NCQ_enabled?
941 "[NCQ]" : ""
942 );
943 }
944 break;
945 }
946
947 case LDT_ARRAY:
948 if (devinfo.target_id != INVALID_TARGET_ID)
949 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
950 devinfo.vbus_id, devinfo.target_id);
951
952 capacity = le64_to_cpu(devinfo.capacity) * 512;
953 do_div(capacity, 1000000);
954 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
955 devinfo.u.array.name,
956 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
957 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
958 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
959 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
960 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
961 "unknown",
962 capacity,
963 get_array_status(&devinfo));
964 for (i = 0; i < devinfo.u.array.ndisk; i++) {
965 if (hpt_id_valid(devinfo.u.array.members[i])) {
966 if (cpu_to_le16(1<<i) &
967 devinfo.u.array.critical_members)
968 hptiop_copy_info(pinfo, "\t*");
969 hptiop_dump_devinfo(hba, pinfo,
970 devinfo.u.array.members[i], indent+1);
971 }
972 else
973 hptiop_copy_info(pinfo, "\tMissing\n");
974 }
975 if (id == devinfo.u.array.transform_source) {
976 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
977 hptiop_dump_devinfo(hba, pinfo,
978 devinfo.u.array.transform_target, indent+1);
979 }
980 break;
981 }
982}
983
984static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 619static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
985{ 620{
986 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 621 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
987} 622}
988 623
989static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
990 size_t count, loff_t *ppos)
991{
992 struct hptiop_hba *hba = filp->private_data;
993 struct hptiop_getinfo info;
994 int i, j, ndev;
995 struct hpt_controller_info con_info;
996 struct hpt_channel_info chan_info;
997 __le32 ids[32];
998
999 info.buffer = buf;
1000 info.buflength = count;
1001 info.bufoffset = ppos ? *ppos : 0;
1002 info.filpos = 0;
1003 info.buffillen = 0;
1004
1005 if (hptiop_get_controller_info(hba, &con_info))
1006 return -EIO;
1007
1008 for (i = 0; i < con_info.num_buses; i++) {
1009 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1010 if (hpt_id_valid(chan_info.devices[0]))
1011 hptiop_dump_devinfo(hba, &info,
1012 chan_info.devices[0], 0);
1013 if (hpt_id_valid(chan_info.devices[1]))
1014 hptiop_dump_devinfo(hba, &info,
1015 chan_info.devices[1], 0);
1016 }
1017 }
1018
1019 ndev = hptiop_get_logical_devices(hba, ids,
1020 sizeof(ids) / sizeof(ids[0]));
1021
1022 /*
1023 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1024 * output nothing here
1025 */
1026 for (j = 0; j < ndev; j++)
1027 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1028
1029 if (ppos)
1030 *ppos += info.buffillen;
1031
1032 return info.buffillen;
1033}
1034
1035static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1036 unsigned int cmd, unsigned long arg)
1037{
1038 struct hptiop_hba *hba = file->private_data;
1039 struct hpt_ioctl_u ioctl_u;
1040 struct hpt_ioctl_k ioctl_k;
1041 u32 bytes_returned;
1042 int err = -EINVAL;
1043
1044 if (copy_from_user(&ioctl_u,
1045 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1046 return -EINVAL;
1047
1048 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1049 return -EINVAL;
1050
1051 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1052 ioctl_k.inbuf = NULL;
1053 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1054 ioctl_k.outbuf = NULL;
1055 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1056 ioctl_k.hba = hba;
1057 ioctl_k.bytes_returned = &bytes_returned;
1058
1059 /* verify user buffer */
1060 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1061 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1062 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1063 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1064 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1065 ioctl_u.bytes_returned, sizeof(u32))) ||
1066 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1067
1068 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1069 return -EINVAL;
1070 }
1071
1072 /* map buffer to kernel. */
1073 if (ioctl_k.inbuf_size) {
1074 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1075 if (!ioctl_k.inbuf) {
1076 dprintk("scsi%d: fail to alloc inbuf\n",
1077 hba->host->host_no);
1078 err = -ENOMEM;
1079 goto err_exit;
1080 }
1081
1082 if (copy_from_user(ioctl_k.inbuf,
1083 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1084 goto err_exit;
1085 }
1086 }
1087
1088 if (ioctl_k.outbuf_size) {
1089 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1090 if (!ioctl_k.outbuf) {
1091 dprintk("scsi%d: fail to alloc outbuf\n",
1092 hba->host->host_no);
1093 err = -ENOMEM;
1094 goto err_exit;
1095 }
1096 }
1097
1098 hptiop_do_ioctl(&ioctl_k);
1099
1100 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1101 if (ioctl_k.outbuf_size &&
1102 copy_to_user(ioctl_u.outbuf,
1103 ioctl_k.outbuf, ioctl_k.outbuf_size))
1104 goto err_exit;
1105
1106 if (ioctl_u.bytes_returned &&
1107 copy_to_user(ioctl_u.bytes_returned,
1108 &bytes_returned, sizeof(u32)))
1109 goto err_exit;
1110
1111 err = 0;
1112 }
1113
1114err_exit:
1115 kfree(ioctl_k.inbuf);
1116 kfree(ioctl_k.outbuf);
1117
1118 return err;
1119}
1120
1121static int hptiop_cdev_open(struct inode *inode, struct file *file)
1122{
1123 struct hptiop_hba *hba;
1124 unsigned i = 0, minor = iminor(inode);
1125 int ret = -ENODEV;
1126
1127 spin_lock(&hptiop_hba_list_lock);
1128 list_for_each_entry(hba, &hptiop_hba_list, link) {
1129 if (i == minor) {
1130 file->private_data = hba;
1131 ret = 0;
1132 goto out;
1133 }
1134 i++;
1135 }
1136
1137out:
1138 spin_unlock(&hptiop_hba_list_lock);
1139 return ret;
1140}
1141
1142static struct file_operations hptiop_cdev_fops = {
1143 .owner = THIS_MODULE,
1144 .read = hptiop_cdev_read,
1145 .ioctl = hptiop_cdev_ioctl,
1146 .open = hptiop_cdev_open,
1147};
1148
1149static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 624static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1150{ 625{
1151 struct Scsi_Host *host = class_to_shost(class_dev); 626 struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1296 goto unmap_pci_bar; 771 goto unmap_pci_bar;
1297 } 772 }
1298 773
1299 if (scsi_add_host(host, &pcidev->dev)) {
1300 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1301 hba->host->host_no);
1302 goto unmap_pci_bar;
1303 }
1304
1305 pci_set_drvdata(pcidev, host); 774 pci_set_drvdata(pcidev, host);
1306 775
1307 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, 776 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1308 driver_name, hba)) { 777 driver_name, hba)) {
1309 printk(KERN_ERR "scsi%d: request irq %d failed\n", 778 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1310 hba->host->host_no, pcidev->irq); 779 hba->host->host_no, pcidev->irq);
1311 goto remove_scsi_host; 780 goto unmap_pci_bar;
1312 } 781 }
1313 782
1314 /* Allocate request mem */ 783 /* Allocate request mem */
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1355 if (hptiop_initialize_iop(hba)) 824 if (hptiop_initialize_iop(hba))
1356 goto free_request_mem; 825 goto free_request_mem;
1357 826
1358 spin_lock(&hptiop_hba_list_lock); 827 if (scsi_add_host(host, &pcidev->dev)) {
1359 list_add_tail(&hba->link, &hptiop_hba_list); 828 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1360 spin_unlock(&hptiop_hba_list_lock); 829 hba->host->host_no);
830 goto free_request_mem;
831 }
832
1361 833
1362 scsi_scan_host(host); 834 scsi_scan_host(host);
1363 835
@@ -1372,9 +844,6 @@ free_request_mem:
1372free_request_irq: 844free_request_irq:
1373 free_irq(hba->pcidev->irq, hba); 845 free_irq(hba->pcidev->irq, hba);
1374 846
1375remove_scsi_host:
1376 scsi_remove_host(host);
1377
1378unmap_pci_bar: 847unmap_pci_bar:
1379 iounmap(hba->iop); 848 iounmap(hba->iop);
1380 849
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
1422 891
1423 scsi_remove_host(host); 892 scsi_remove_host(host);
1424 893
1425 spin_lock(&hptiop_hba_list_lock);
1426 list_del_init(&hba->link);
1427 spin_unlock(&hptiop_hba_list_lock);
1428
1429 hptiop_shutdown(pcidev); 894 hptiop_shutdown(pcidev);
1430 895
1431 free_irq(hba->pcidev->irq, hba); 896 free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = {
1462 927
1463static int __init hptiop_module_init(void) 928static int __init hptiop_module_init(void)
1464{ 929{
1465 int error;
1466
1467 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 930 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1468 931 return pci_register_driver(&hptiop_pci_driver);
1469 error = pci_register_driver(&hptiop_pci_driver);
1470 if (error < 0)
1471 return error;
1472
1473 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1474 if (hptiop_cdev_major < 0) {
1475 printk(KERN_WARNING "unable to register hptiop device.\n");
1476 return hptiop_cdev_major;
1477 }
1478
1479 return 0;
1480} 932}
1481 933
1482static void __exit hptiop_module_exit(void) 934static void __exit hptiop_module_exit(void)
1483{ 935{
1484 dprintk("hptiop_module_exit\n");
1485 unregister_chrdev(hptiop_cdev_major, "hptiop");
1486 pci_unregister_driver(&hptiop_pci_driver); 936 pci_unregister_driver(&hptiop_pci_driver);
1487} 937}
1488 938
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f7b5d7372d2..94d1de55607 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
517 /* No more interrupts */ 517 /* No more interrupts */
518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); 519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
520 local_irq_enable(); 520 local_irq_enable_in_hardirq();
521 if (status.b.check) 521 if (status.b.check)
522 rq->errors++; 522 rq->errors++;
523 idescsi_end_request (drive, 1, 0); 523 idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 848fb2aa4ca..058f094f945 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
43 43
44#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
45 45
46#define ISCSI_TCP_VERSION "1.0-595"
47
48MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>"); 47 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path"); 48MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
52MODULE_VERSION(ISCSI_TCP_VERSION);
53/* #define DEBUG_TCP */ 50/* #define DEBUG_TCP */
54#define DEBUG_ASSERT 51#define DEBUG_ASSERT
55 52
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
185 * must be called with session lock 182 * must be called with session lock
186 */ 183 */
187static void 184static void
188__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 185iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
189{ 186{
190 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 187 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
188 struct iscsi_r2t_info *r2t;
191 struct scsi_cmnd *sc; 189 struct scsi_cmnd *sc;
192 190
191 /* flush ctask's r2t queues */
192 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
193 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
194 sizeof(void*));
195 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
196 }
197
193 sc = ctask->sc; 198 sc = ctask->sc;
194 if (unlikely(!sc)) 199 if (unlikely(!sc))
195 return; 200 return;
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
374 spin_unlock(&session->lock); 379 spin_unlock(&session->lock);
375 return 0; 380 return 0;
376 } 381 }
382
377 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 383 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
378 BUG_ON(!rc); 384 BUG_ON(!rc);
379 385
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
399 tcp_ctask->exp_r2tsn = r2tsn + 1; 405 tcp_ctask->exp_r2tsn = r2tsn + 1;
400 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 406 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
401 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 407 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
402 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 408 list_move_tail(&ctask->running, &conn->xmitqueue);
403 409
404 scsi_queue_work(session->host, &conn->xmitwork); 410 scsi_queue_work(session->host, &conn->xmitwork);
405 conn->r2t_pdus_cnt++; 411 conn->r2t_pdus_cnt++;
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
477 case ISCSI_OP_SCSI_DATA_IN: 483 case ISCSI_OP_SCSI_DATA_IN:
478 tcp_conn->in.ctask = session->cmds[itt]; 484 tcp_conn->in.ctask = session->cmds[itt];
479 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 485 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
486 if (rc)
487 return rc;
480 /* fall through */ 488 /* fall through */
481 case ISCSI_OP_SCSI_CMD_RSP: 489 case ISCSI_OP_SCSI_CMD_RSP:
482 tcp_conn->in.ctask = session->cmds[itt]; 490 tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
484 goto copy_hdr; 492 goto copy_hdr;
485 493
486 spin_lock(&session->lock); 494 spin_lock(&session->lock);
487 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 495 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
488 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 496 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
489 spin_unlock(&session->lock); 497 spin_unlock(&session->lock);
490 break; 498 break;
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
500 break; 508 break;
501 case ISCSI_OP_LOGIN_RSP: 509 case ISCSI_OP_LOGIN_RSP:
502 case ISCSI_OP_TEXT_RSP: 510 case ISCSI_OP_TEXT_RSP:
503 case ISCSI_OP_LOGOUT_RSP:
504 case ISCSI_OP_NOOP_IN:
505 case ISCSI_OP_REJECT: 511 case ISCSI_OP_REJECT:
506 case ISCSI_OP_ASYNC_EVENT: 512 case ISCSI_OP_ASYNC_EVENT:
513 /*
514 * It is possible that we could get a PDU with a buffer larger
515 * than 8K, but there are no targets that currently do this.
516 * For now we fail until we find a vendor that needs it
517 */
518 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
519 tcp_conn->in.datalen) {
520 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
521 "but conn buffer is only %u (opcode %0x)\n",
522 tcp_conn->in.datalen,
523 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
524 rc = ISCSI_ERR_PROTO;
525 break;
526 }
527
507 if (tcp_conn->in.datalen) 528 if (tcp_conn->in.datalen)
508 goto copy_hdr; 529 goto copy_hdr;
509 /* fall through */ 530 /* fall through */
531 case ISCSI_OP_LOGOUT_RSP:
532 case ISCSI_OP_NOOP_IN:
510 case ISCSI_OP_SCSI_TMFUNC_RSP: 533 case ISCSI_OP_SCSI_TMFUNC_RSP:
511 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 534 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
512 break; 535 break;
@@ -523,7 +546,7 @@ copy_hdr:
523 * skbs to complete the command then we have to copy the header 546 * skbs to complete the command then we have to copy the header
524 * for later use 547 * for later use
525 */ 548 */
526 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < 549 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
527 (tcp_conn->in.datalen + tcp_conn->in.padding + 550 (tcp_conn->in.datalen + tcp_conn->in.padding +
528 (conn->datadgst_en ? 4 : 0))) { 551 (conn->datadgst_en ? 4 : 0))) {
529 debug_tcp("Copying header for later use. in.copy %d in.datalen" 552 debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
614 * byte counters. 637 * byte counters.
615 **/ 638 **/
616static inline int 639static inline int
617iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) 640iscsi_tcp_copy(struct iscsi_conn *conn)
618{ 641{
619 void *buf = tcp_conn->data; 642 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
620 int buf_size = tcp_conn->in.datalen; 643 int buf_size = tcp_conn->in.datalen;
621 int buf_left = buf_size - tcp_conn->data_copied; 644 int buf_left = buf_size - tcp_conn->data_copied;
622 int size = min(tcp_conn->in.copy, buf_left); 645 int size = min(tcp_conn->in.copy, buf_left);
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
627 BUG_ON(size <= 0); 650 BUG_ON(size <= 0);
628 651
629 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 652 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
630 (char*)buf + tcp_conn->data_copied, size); 653 (char*)conn->data + tcp_conn->data_copied, size);
631 BUG_ON(rc); 654 BUG_ON(rc);
632 655
633 tcp_conn->in.offset += size; 656 tcp_conn->in.offset += size;
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
745done: 768done:
746 /* check for non-exceptional status */ 769 /* check for non-exceptional status */
747 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 770 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
748 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 771 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
749 (long)sc, sc->result, ctask->itt); 772 (long)sc, sc->result, ctask->itt,
773 tcp_conn->in.hdr->flags);
750 spin_lock(&conn->session->lock); 774 spin_lock(&conn->session->lock);
751 __iscsi_ctask_cleanup(conn, ctask); 775 iscsi_tcp_cleanup_ctask(conn, ctask);
752 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 776 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
753 spin_unlock(&conn->session->lock); 777 spin_unlock(&conn->session->lock);
754 } 778 }
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn)
769 break; 793 break;
770 case ISCSI_OP_SCSI_CMD_RSP: 794 case ISCSI_OP_SCSI_CMD_RSP:
771 spin_lock(&conn->session->lock); 795 spin_lock(&conn->session->lock);
772 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 796 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
773 spin_unlock(&conn->session->lock); 797 spin_unlock(&conn->session->lock);
774 case ISCSI_OP_TEXT_RSP: 798 case ISCSI_OP_TEXT_RSP:
775 case ISCSI_OP_LOGIN_RSP: 799 case ISCSI_OP_LOGIN_RSP:
776 case ISCSI_OP_NOOP_IN:
777 case ISCSI_OP_ASYNC_EVENT: 800 case ISCSI_OP_ASYNC_EVENT:
778 case ISCSI_OP_REJECT: 801 case ISCSI_OP_REJECT:
779 /* 802 /*
780 * Collect data segment to the connection's data 803 * Collect data segment to the connection's data
781 * placeholder 804 * placeholder
782 */ 805 */
783 if (iscsi_tcp_copy(tcp_conn)) { 806 if (iscsi_tcp_copy(conn)) {
784 rc = -EAGAIN; 807 rc = -EAGAIN;
785 goto exit; 808 goto exit;
786 } 809 }
787 810
788 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, 811 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
789 tcp_conn->in.datalen); 812 tcp_conn->in.datalen);
790 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) 813 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
791 iscsi_recv_digest_update(tcp_conn, tcp_conn->data, 814 iscsi_recv_digest_update(tcp_conn, conn->data,
792 tcp_conn->in.datalen); 815 tcp_conn->in.datalen);
793 break; 816 break;
794 default: 817 default:
@@ -843,7 +866,7 @@ more:
843 if (rc == -EAGAIN) 866 if (rc == -EAGAIN)
844 goto nomore; 867 goto nomore;
845 else { 868 else {
846 iscsi_conn_failure(conn, rc); 869 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
847 return 0; 870 return 0;
848 } 871 }
849 } 872 }
@@ -897,7 +920,7 @@ more:
897 if (rc) { 920 if (rc) {
898 if (rc == -EAGAIN) 921 if (rc == -EAGAIN)
899 goto again; 922 goto again;
900 iscsi_conn_failure(conn, rc); 923 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
901 return 0; 924 return 0;
902 } 925 }
903 tcp_conn->in.copy -= tcp_conn->in.padding; 926 tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1028} 1051}
1029 1052
1030static void 1053static void
1031iscsi_conn_restore_callbacks(struct iscsi_conn *conn) 1054iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1032{ 1055{
1033 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1034 struct sock *sk = tcp_conn->sock->sk; 1056 struct sock *sk = tcp_conn->sock->sk;
1035 1057
1036 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 1058 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1308 ctask->imm_count - 1330 ctask->imm_count -
1309 ctask->unsol_count; 1331 ctask->unsol_count;
1310 1332
1311 debug_scsi("cmd [itt %x total %d imm %d imm_data %d " 1333 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
1312 "r2t_data %d]\n", 1334 "r2t_data %d]\n",
1313 ctask->itt, ctask->total_length, ctask->imm_count, 1335 ctask->itt, ctask->total_length, ctask->imm_count,
1314 ctask->unsol_count, tcp_ctask->r2t_data_count); 1336 ctask->unsol_count, tcp_ctask->r2t_data_count);
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1636 } 1658 }
1637solicit_again: 1659solicit_again:
1638 /* 1660 /*
1639 * send Data-Out whitnin this R2T sequence. 1661 * send Data-Out within this R2T sequence.
1640 */ 1662 */
1641 if (!r2t->data_count) 1663 if (!r2t->data_count)
1642 goto data_out_done; 1664 goto data_out_done;
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1731 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1753 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1732 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1754 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1733 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1755 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1734 int sent, rc; 1756 int sent = 0, rc;
1735 1757
1736 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1758 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1737 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1759 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1900 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1922 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1901 /* initial operational parameters */ 1923 /* initial operational parameters */
1902 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1924 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1903 tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
1904
1905 /* allocate initial PDU receive place holder */
1906 if (tcp_conn->data_size <= PAGE_SIZE)
1907 tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL);
1908 else
1909 tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL,
1910 get_order(tcp_conn->data_size));
1911 if (!tcp_conn->data)
1912 goto max_recv_dlenght_alloc_fail;
1913 1925
1914 return cls_conn; 1926 return cls_conn;
1915 1927
1916max_recv_dlenght_alloc_fail:
1917 kfree(tcp_conn);
1918tcp_conn_alloc_fail: 1928tcp_conn_alloc_fail:
1919 iscsi_conn_teardown(cls_conn); 1929 iscsi_conn_teardown(cls_conn);
1920 return NULL; 1930 return NULL;
1921} 1931}
1922 1932
1923static void 1933static void
1934iscsi_tcp_release_conn(struct iscsi_conn *conn)
1935{
1936 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1937
1938 if (!tcp_conn->sock)
1939 return;
1940
1941 sock_hold(tcp_conn->sock->sk);
1942 iscsi_conn_restore_callbacks(tcp_conn);
1943 sock_put(tcp_conn->sock->sk);
1944
1945 sock_release(tcp_conn->sock);
1946 tcp_conn->sock = NULL;
1947 conn->recv_lock = NULL;
1948}
1949
1950static void
1924iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) 1951iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1925{ 1952{
1926 struct iscsi_conn *conn = cls_conn->dd_data; 1953 struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1930 if (conn->hdrdgst_en || conn->datadgst_en) 1957 if (conn->hdrdgst_en || conn->datadgst_en)
1931 digest = 1; 1958 digest = 1;
1932 1959
1960 iscsi_tcp_release_conn(conn);
1933 iscsi_conn_teardown(cls_conn); 1961 iscsi_conn_teardown(cls_conn);
1934 1962
1935 /* now free tcp_conn */ 1963 /* now free tcp_conn */
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1944 crypto_free_tfm(tcp_conn->data_rx_tfm); 1972 crypto_free_tfm(tcp_conn->data_rx_tfm);
1945 } 1973 }
1946 1974
1947 /* free conn->data, size = MaxRecvDataSegmentLength */
1948 if (tcp_conn->data_size <= PAGE_SIZE)
1949 kfree(tcp_conn->data);
1950 else
1951 free_pages((unsigned long)tcp_conn->data,
1952 get_order(tcp_conn->data_size));
1953 kfree(tcp_conn); 1975 kfree(tcp_conn);
1954} 1976}
1955 1977
1978static void
1979iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1980{
1981 struct iscsi_conn *conn = cls_conn->dd_data;
1982
1983 iscsi_conn_stop(cls_conn, flag);
1984 iscsi_tcp_release_conn(conn);
1985}
1986
1956static int 1987static int
1957iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1988iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1958 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1989 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
2001 return 0; 2032 return 0;
2002} 2033}
2003 2034
2004static void
2005iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2006{
2007 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2008 struct iscsi_r2t_info *r2t;
2009
2010 /* flush ctask's r2t queues */
2011 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
2012 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
2013 sizeof(void*));
2014
2015 __iscsi_ctask_cleanup(conn, ctask);
2016}
2017
2018static void
2019iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
2020{
2021 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2022 struct sock *sk;
2023
2024 if (!tcp_conn->sock)
2025 return;
2026
2027 sk = tcp_conn->sock->sk;
2028 write_lock_bh(&sk->sk_callback_lock);
2029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2030 write_unlock_bh(&sk->sk_callback_lock);
2031}
2032
2033static void
2034iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
2035{
2036 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2037
2038 if (!tcp_conn->sock)
2039 return;
2040
2041 sock_hold(tcp_conn->sock->sk);
2042 iscsi_conn_restore_callbacks(conn);
2043 sock_put(tcp_conn->sock->sk);
2044
2045 sock_release(tcp_conn->sock);
2046 tcp_conn->sock = NULL;
2047 conn->recv_lock = NULL;
2048}
2049
2050/* called with host lock */ 2035/* called with host lock */
2051static void 2036static void
2052iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 2037iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
2057 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, 2042 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
2058 sizeof(struct iscsi_hdr)); 2043 sizeof(struct iscsi_hdr));
2059 tcp_mtask->xmstate = XMSTATE_IMM_HDR; 2044 tcp_mtask->xmstate = XMSTATE_IMM_HDR;
2045 tcp_mtask->sent = 0;
2060 2046
2061 if (mtask->data_count) 2047 if (mtask->data_count)
2062 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, 2048 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2138 int value; 2124 int value;
2139 2125
2140 switch(param) { 2126 switch(param) {
2141 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
2142 char *saveptr = tcp_conn->data;
2143 gfp_t flags = GFP_KERNEL;
2144
2145 sscanf(buf, "%d", &value);
2146 if (tcp_conn->data_size >= value) {
2147 iscsi_set_param(cls_conn, param, buf, buflen);
2148 break;
2149 }
2150
2151 spin_lock_bh(&session->lock);
2152 if (conn->stop_stage == STOP_CONN_RECOVER)
2153 flags = GFP_ATOMIC;
2154 spin_unlock_bh(&session->lock);
2155
2156 if (value <= PAGE_SIZE)
2157 tcp_conn->data = kmalloc(value, flags);
2158 else
2159 tcp_conn->data = (void*)__get_free_pages(flags,
2160 get_order(value));
2161 if (tcp_conn->data == NULL) {
2162 tcp_conn->data = saveptr;
2163 return -ENOMEM;
2164 }
2165 if (tcp_conn->data_size <= PAGE_SIZE)
2166 kfree(saveptr);
2167 else
2168 free_pages((unsigned long)saveptr,
2169 get_order(tcp_conn->data_size));
2170 iscsi_set_param(cls_conn, param, buf, buflen);
2171 tcp_conn->data_size = value;
2172 break;
2173 }
2174 case ISCSI_PARAM_HDRDGST_EN: 2127 case ISCSI_PARAM_HDRDGST_EN:
2175 iscsi_set_param(cls_conn, param, buf, buflen); 2128 iscsi_set_param(cls_conn, param, buf, buflen);
2176 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 2129 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
2361} 2314}
2362 2315
2363static struct scsi_host_template iscsi_sht = { 2316static struct scsi_host_template iscsi_sht = {
2364 .name = "iSCSI Initiator over TCP/IP, v" 2317 .name = "iSCSI Initiator over TCP/IP",
2365 ISCSI_TCP_VERSION,
2366 .queuecommand = iscsi_queuecommand, 2318 .queuecommand = iscsi_queuecommand,
2367 .change_queue_depth = iscsi_change_queue_depth, 2319 .change_queue_depth = iscsi_change_queue_depth,
2368 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2320 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
2414 .get_conn_param = iscsi_tcp_conn_get_param, 2366 .get_conn_param = iscsi_tcp_conn_get_param,
2415 .get_session_param = iscsi_session_get_param, 2367 .get_session_param = iscsi_session_get_param,
2416 .start_conn = iscsi_conn_start, 2368 .start_conn = iscsi_conn_start,
2417 .stop_conn = iscsi_conn_stop, 2369 .stop_conn = iscsi_tcp_conn_stop,
2418 /* these are called as part of conn recovery */
2419 .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
2420 .terminate_conn = iscsi_tcp_terminate_conn,
2421 /* IO */ 2370 /* IO */
2422 .send_pdu = iscsi_conn_send_pdu, 2371 .send_pdu = iscsi_conn_send_pdu,
2423 .get_stats = iscsi_conn_get_stats, 2372 .get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 808302832e6..6a4ee704e46 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn {
78 char hdrext[4*sizeof(__u16) + 78 char hdrext[4*sizeof(__u16) +
79 sizeof(__u32)]; 79 sizeof(__u32)];
80 int data_copied; 80 int data_copied;
81 char *data; /* data placeholder */
82 int data_size; /* actual recv_dlength */
83 int stop_stage; /* conn_stop() flag: * 81 int stop_stage; /* conn_stop() flag: *
84 * stop to recover, * 82 * stop to recover, *
85 * stop to terminate */ 83 * stop to terminate */
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 386e5f21e19..73dd6c8deed 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2747 return rc; 2747 return rc;
2748 2748
2749 scontrol = (scontrol & 0x0f0) | 0x302; 2749 scontrol = (scontrol & 0x0f0) | 0x304;
2750 2750
2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2752 return rc; 2752 return rc;
@@ -5185,28 +5185,6 @@ void ata_host_stop (struct ata_host_set *host_set)
5185 iounmap(host_set->mmio_base); 5185 iounmap(host_set->mmio_base);
5186} 5186}
5187 5187
5188
5189/**
5190 * ata_host_remove - Unregister SCSI host structure with upper layers
5191 * @ap: Port to unregister
5192 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5193 *
5194 * LOCKING:
5195 * Inherited from caller.
5196 */
5197
5198static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5199{
5200 struct Scsi_Host *sh = ap->host;
5201
5202 DPRINTK("ENTER\n");
5203
5204 if (do_unregister)
5205 scsi_remove_host(sh);
5206
5207 ap->ops->port_stop(ap);
5208}
5209
5210/** 5188/**
5211 * ata_dev_init - Initialize an ata_device structure 5189 * ata_dev_init - Initialize an ata_device structure
5212 * @dev: Device structure to initialize 5190 * @dev: Device structure to initialize
@@ -5532,8 +5510,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
5532 5510
5533err_out: 5511err_out:
5534 for (i = 0; i < count; i++) { 5512 for (i = 0; i < count; i++) {
5535 ata_host_remove(host_set->ports[i], 1); 5513 struct ata_port *ap = host_set->ports[i];
5536 scsi_host_put(host_set->ports[i]->host); 5514 if (ap) {
5515 ap->ops->port_stop(ap);
5516 scsi_host_put(ap->host);
5517 }
5537 } 5518 }
5538err_free_ret: 5519err_free_ret:
5539 kfree(host_set); 5520 kfree(host_set);
@@ -5558,7 +5539,7 @@ void ata_port_detach(struct ata_port *ap)
5558 int i; 5539 int i;
5559 5540
5560 if (!ap->ops->error_handler) 5541 if (!ap->ops->error_handler)
5561 return; 5542 goto skip_eh;
5562 5543
5563 /* tell EH we're leaving & flush EH */ 5544 /* tell EH we're leaving & flush EH */
5564 spin_lock_irqsave(ap->lock, flags); 5545 spin_lock_irqsave(ap->lock, flags);
@@ -5594,6 +5575,7 @@ void ata_port_detach(struct ata_port *ap)
5594 cancel_delayed_work(&ap->hotplug_task); 5575 cancel_delayed_work(&ap->hotplug_task);
5595 flush_workqueue(ata_aux_wq); 5576 flush_workqueue(ata_aux_wq);
5596 5577
5578 skip_eh:
5597 /* remove the associated SCSI host */ 5579 /* remove the associated SCSI host */
5598 scsi_remove_host(ap->host); 5580 scsi_remove_host(ap->host);
5599} 5581}
@@ -5662,7 +5644,7 @@ int ata_scsi_release(struct Scsi_Host *host)
5662 DPRINTK("ENTER\n"); 5644 DPRINTK("ENTER\n");
5663 5645
5664 ap->ops->port_disable(ap); 5646 ap->ops->port_disable(ap);
5665 ata_host_remove(ap, 0); 5647 ap->ops->port_stop(ap);
5666 5648
5667 DPRINTK("EXIT\n"); 5649 DPRINTK("EXIT\n");
5668 return 1; 5650 return 1;
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 7ced41ecde8..e92c31d698f 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -2353,6 +2353,19 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2353 ata_gen_ata_desc_sense(qc); 2353 ata_gen_ata_desc_sense(qc);
2354 } 2354 }
2355 2355
2356 /* SCSI EH automatically locks door if sdev->locked is
2357 * set. Sometimes door lock request continues to
2358 * fail, for example, when no media is present. This
2359 * creates a loop - SCSI EH issues door lock which
2360 * fails and gets invoked again to acquire sense data
2361 * for the failed command.
2362 *
2363 * If door lock fails, always clear sdev->locked to
2364 * avoid this infinite loop.
2365 */
2366 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2367 qc->dev->sdev->locked = 0;
2368
2356 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2369 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2357 qc->scsidone(cmd); 2370 qc->scsidone(cmd);
2358 ata_qc_free(qc); 2371 ata_qc_free(qc);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7e6e031cc41..5884cd26d53 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session,
189{ 189{
190 struct scsi_cmnd *sc = ctask->sc; 190 struct scsi_cmnd *sc = ctask->sc;
191 191
192 ctask->state = ISCSI_TASK_COMPLETED;
192 ctask->sc = NULL; 193 ctask->sc = NULL;
193 list_del_init(&ctask->running); 194 list_del_init(&ctask->running);
194 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 195 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
@@ -275,6 +276,25 @@ out:
275 return rc; 276 return rc;
276} 277}
277 278
279static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
280{
281 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
282
283 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
284 conn->tmfrsp_pdus_cnt++;
285
286 if (conn->tmabort_state != TMABORT_INITIAL)
287 return;
288
289 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
290 conn->tmabort_state = TMABORT_SUCCESS;
291 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
292 conn->tmabort_state = TMABORT_NOT_FOUND;
293 else
294 conn->tmabort_state = TMABORT_FAILED;
295 wake_up(&conn->ehwait);
296}
297
278/** 298/**
279 * __iscsi_complete_pdu - complete pdu 299 * __iscsi_complete_pdu - complete pdu
280 * @conn: iscsi conn 300 * @conn: iscsi conn
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
340 360
341 switch(opcode) { 361 switch(opcode) {
342 case ISCSI_OP_LOGOUT_RSP: 362 case ISCSI_OP_LOGOUT_RSP:
363 if (datalen) {
364 rc = ISCSI_ERR_PROTO;
365 break;
366 }
343 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 367 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
344 /* fall through */ 368 /* fall through */
345 case ISCSI_OP_LOGIN_RSP: 369 case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
348 * login related PDU's exp_statsn is handled in 372 * login related PDU's exp_statsn is handled in
349 * userspace 373 * userspace
350 */ 374 */
351 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 375 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
376 rc = ISCSI_ERR_CONN_FAILED;
352 list_del(&mtask->running); 377 list_del(&mtask->running);
353 if (conn->login_mtask != mtask) 378 if (conn->login_mtask != mtask)
354 __kfifo_put(session->mgmtpool.queue, 379 __kfifo_put(session->mgmtpool.queue,
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
360 break; 385 break;
361 } 386 }
362 387
363 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 388 iscsi_tmf_rsp(conn, hdr);
364 conn->tmfrsp_pdus_cnt++;
365 if (conn->tmabort_state == TMABORT_INITIAL) {
366 conn->tmabort_state =
367 ((struct iscsi_tm_rsp *)hdr)->
368 response == ISCSI_TMF_RSP_COMPLETE ?
369 TMABORT_SUCCESS:TMABORT_FAILED;
370 /* unblock eh_abort() */
371 wake_up(&conn->ehwait);
372 }
373 break; 389 break;
374 case ISCSI_OP_NOOP_IN: 390 case ISCSI_OP_NOOP_IN:
375 if (hdr->ttt != ISCSI_RESERVED_TAG) { 391 if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
376 rc = ISCSI_ERR_PROTO; 392 rc = ISCSI_ERR_PROTO;
377 break; 393 break;
378 } 394 }
379 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 395 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
380 396
381 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 397 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
398 rc = ISCSI_ERR_CONN_FAILED;
382 list_del(&mtask->running); 399 list_del(&mtask->running);
383 if (conn->login_mtask != mtask) 400 if (conn->login_mtask != mtask)
384 __kfifo_put(session->mgmtpool.queue, 401 __kfifo_put(session->mgmtpool.queue,
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
391 } else if (itt == ISCSI_RESERVED_TAG) { 408 } else if (itt == ISCSI_RESERVED_TAG) {
392 switch(opcode) { 409 switch(opcode) {
393 case ISCSI_OP_NOOP_IN: 410 case ISCSI_OP_NOOP_IN:
394 if (!datalen) { 411 if (datalen) {
395 rc = iscsi_check_assign_cmdsn(session,
396 (struct iscsi_nopin*)hdr);
397 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
398 rc = iscsi_recv_pdu(conn->cls_conn,
399 hdr, NULL, 0);
400 } else
401 rc = ISCSI_ERR_PROTO; 412 rc = ISCSI_ERR_PROTO;
413 break;
414 }
415
416 rc = iscsi_check_assign_cmdsn(session,
417 (struct iscsi_nopin*)hdr);
418 if (rc)
419 break;
420
421 if (hdr->ttt == ISCSI_RESERVED_TAG)
422 break;
423
424 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
425 rc = ISCSI_ERR_CONN_FAILED;
402 break; 426 break;
403 case ISCSI_OP_REJECT: 427 case ISCSI_OP_REJECT:
404 /* we need sth like iscsi_reject_rsp()*/ 428 /* we need sth like iscsi_reject_rsp()*/
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
568 } 592 }
569 593
570 /* process command queue */ 594 /* process command queue */
571 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, 595 spin_lock_bh(&conn->session->lock);
572 sizeof(void*))) { 596 while (!list_empty(&conn->xmitqueue)) {
573 /* 597 /*
574 * iscsi tcp may readd the task to the xmitqueue to send 598 * iscsi tcp may readd the task to the xmitqueue to send
575 * write data 599 * write data
576 */ 600 */
577 spin_lock_bh(&conn->session->lock); 601 conn->ctask = list_entry(conn->xmitqueue.next,
578 if (list_empty(&conn->ctask->running)) 602 struct iscsi_cmd_task, running);
579 list_add_tail(&conn->ctask->running, &conn->run_list); 603 conn->ctask->state = ISCSI_TASK_RUNNING;
604 list_move_tail(conn->xmitqueue.next, &conn->run_list);
580 spin_unlock_bh(&conn->session->lock); 605 spin_unlock_bh(&conn->session->lock);
606
581 rc = tt->xmit_cmd_task(conn, conn->ctask); 607 rc = tt->xmit_cmd_task(conn, conn->ctask);
582 if (rc) 608 if (rc)
583 goto again; 609 goto again;
610 spin_lock_bh(&conn->session->lock);
584 } 611 }
612 spin_unlock_bh(&conn->session->lock);
585 /* done with this ctask */ 613 /* done with this ctask */
586 conn->ctask = NULL; 614 conn->ctask = NULL;
587 615
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
691 sc->SCp.phase = session->age; 719 sc->SCp.phase = session->age;
692 sc->SCp.ptr = (char *)ctask; 720 sc->SCp.ptr = (char *)ctask;
693 721
722 ctask->state = ISCSI_TASK_PENDING;
694 ctask->mtask = NULL; 723 ctask->mtask = NULL;
695 ctask->conn = conn; 724 ctask->conn = conn;
696 ctask->sc = sc; 725 ctask->sc = sc;
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
700 729
701 session->tt->init_cmd_task(ctask); 730 session->tt->init_cmd_task(ctask);
702 731
703 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 732 list_add_tail(&ctask->running, &conn->xmitqueue);
704 debug_scsi( 733 debug_scsi(
705 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 734 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
706 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 735 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
977/* 1006/*
978 * xmit mutex and session lock must be held 1007 * xmit mutex and session lock must be held
979 */ 1008 */
980#define iscsi_remove_task(tasktype) \ 1009static struct iscsi_mgmt_task *
981static struct iscsi_##tasktype * \ 1010iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
982iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ 1011{
983{ \ 1012 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
984 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ 1013 struct iscsi_mgmt_task *task;
985 struct iscsi_##tasktype *task; \
986 \
987 debug_scsi("searching %d tasks\n", nr_tasks); \
988 \
989 for (i = 0; i < nr_tasks; i++) { \
990 __kfifo_get(fifo, (void*)&task, sizeof(void*)); \
991 debug_scsi("check task %u\n", task->itt); \
992 \
993 if (task->itt == itt) { \
994 debug_scsi("matched task\n"); \
995 return task; \
996 } \
997 \
998 __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
999 } \
1000 return NULL; \
1001}
1002 1014
1003iscsi_remove_task(mgmt_task); 1015 debug_scsi("searching %d tasks\n", nr_tasks);
1004iscsi_remove_task(cmd_task); 1016
1017 for (i = 0; i < nr_tasks; i++) {
1018 __kfifo_get(fifo, (void*)&task, sizeof(void*));
1019 debug_scsi("check task %u\n", task->itt);
1020
1021 if (task->itt == itt) {
1022 debug_scsi("matched task\n");
1023 return task;
1024 }
1025
1026 __kfifo_put(fifo, (void*)&task, sizeof(void*));
1027 }
1028 return NULL;
1029}
1005 1030
1006static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) 1031static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1007{ 1032{
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1027{ 1052{
1028 struct scsi_cmnd *sc; 1053 struct scsi_cmnd *sc;
1029 1054
1030 conn->session->tt->cleanup_cmd_task(conn, ctask);
1031 iscsi_ctask_mtask_cleanup(ctask);
1032
1033 sc = ctask->sc; 1055 sc = ctask->sc;
1034 if (!sc) 1056 if (!sc)
1035 return; 1057 return;
1058
1059 conn->session->tt->cleanup_cmd_task(conn, ctask);
1060 iscsi_ctask_mtask_cleanup(ctask);
1061
1036 sc->result = err; 1062 sc->result = err;
1037 sc->resid = sc->request_bufflen; 1063 sc->resid = sc->request_bufflen;
1038 iscsi_complete_command(conn->session, ctask); 1064 iscsi_complete_command(conn->session, ctask);
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1043 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1069 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1044 struct iscsi_conn *conn = ctask->conn; 1070 struct iscsi_conn *conn = ctask->conn;
1045 struct iscsi_session *session = conn->session; 1071 struct iscsi_session *session = conn->session;
1046 struct iscsi_cmd_task *pending_ctask;
1047 int rc; 1072 int rc;
1048 1073
1049 conn->eh_abort_cnt++; 1074 conn->eh_abort_cnt++;
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1061 goto failed; 1086 goto failed;
1062 1087
1063 /* ctask completed before time out */ 1088 /* ctask completed before time out */
1064 if (!ctask->sc) 1089 if (!ctask->sc) {
1065 goto success; 1090 spin_unlock_bh(&session->lock);
1091 debug_scsi("sc completed while abort in progress\n");
1092 goto success_rel_mutex;
1093 }
1066 1094
1067 /* what should we do here ? */ 1095 /* what should we do here ? */
1068 if (conn->ctask == ctask) { 1096 if (conn->ctask == ctask) {
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1071 goto failed; 1099 goto failed;
1072 } 1100 }
1073 1101
1074 /* check for the easy pending cmd abort */ 1102 if (ctask->state == ISCSI_TASK_PENDING)
1075 pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); 1103 goto success_cleanup;
1076 if (pending_ctask) {
1077 /* iscsi_tcp queues write transfers on the xmitqueue */
1078 if (list_empty(&pending_ctask->running)) {
1079 debug_scsi("found pending task\n");
1080 goto success;
1081 } else
1082 __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
1083 sizeof(void*));
1084 }
1085 1104
1086 conn->tmabort_state = TMABORT_INITIAL; 1105 conn->tmabort_state = TMABORT_INITIAL;
1087 1106
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1089 rc = iscsi_exec_abort_task(sc, ctask); 1108 rc = iscsi_exec_abort_task(sc, ctask);
1090 spin_lock_bh(&session->lock); 1109 spin_lock_bh(&session->lock);
1091 1110
1092 iscsi_ctask_mtask_cleanup(ctask);
1093 if (rc || sc->SCp.phase != session->age || 1111 if (rc || sc->SCp.phase != session->age ||
1094 session->state != ISCSI_STATE_LOGGED_IN) 1112 session->state != ISCSI_STATE_LOGGED_IN)
1095 goto failed; 1113 goto failed;
1114 iscsi_ctask_mtask_cleanup(ctask);
1096 1115
1097 /* ctask completed before tmf abort response */ 1116 switch (conn->tmabort_state) {
1098 if (!ctask->sc) { 1117 case TMABORT_SUCCESS:
1099 debug_scsi("sc completed while abort in progress\n"); 1118 goto success_cleanup;
1100 goto success; 1119 case TMABORT_NOT_FOUND:
1101 } 1120 if (!ctask->sc) {
1102 1121 /* ctask completed before tmf abort response */
1103 if (conn->tmabort_state != TMABORT_SUCCESS) { 1122 spin_unlock_bh(&session->lock);
1123 debug_scsi("sc completed while abort in progress\n");
1124 goto success_rel_mutex;
1125 }
1126 /* fall through */
1127 default:
1128 /* timedout or failed */
1104 spin_unlock_bh(&session->lock); 1129 spin_unlock_bh(&session->lock);
1105 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1130 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1106 spin_lock_bh(&session->lock); 1131 spin_lock_bh(&session->lock);
1107 goto failed; 1132 goto failed;
1108 } 1133 }
1109 1134
1110success: 1135success_cleanup:
1111 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1136 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1112 spin_unlock_bh(&session->lock); 1137 spin_unlock_bh(&session->lock);
1113 1138
@@ -1121,6 +1146,7 @@ success:
1121 spin_unlock(&session->lock); 1146 spin_unlock(&session->lock);
1122 write_unlock_bh(conn->recv_lock); 1147 write_unlock_bh(conn->recv_lock);
1123 1148
1149success_rel_mutex:
1124 mutex_unlock(&conn->xmitmutex); 1150 mutex_unlock(&conn->xmitmutex);
1125 return SUCCESS; 1151 return SUCCESS;
1126 1152
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1263 if (cmd_task_size) 1289 if (cmd_task_size)
1264 ctask->dd_data = &ctask[1]; 1290 ctask->dd_data = &ctask[1];
1265 ctask->itt = cmd_i; 1291 ctask->itt = cmd_i;
1292 INIT_LIST_HEAD(&ctask->running);
1266 } 1293 }
1267 1294
1268 spin_lock_init(&session->lock); 1295 spin_lock_init(&session->lock);
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1282 if (mgmt_task_size) 1309 if (mgmt_task_size)
1283 mtask->dd_data = &mtask[1]; 1310 mtask->dd_data = &mtask[1];
1284 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; 1311 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1312 INIT_LIST_HEAD(&mtask->running);
1285 } 1313 }
1286 1314
1287 if (scsi_add_host(shost, NULL)) 1315 if (scsi_add_host(shost, NULL))
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1322{ 1350{
1323 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1351 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1324 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 1352 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1353 struct module *owner = cls_session->transport->owner;
1325 1354
1326 scsi_remove_host(shost); 1355 scsi_remove_host(shost);
1327 1356
1328 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1357 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
1329 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1358 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
1330 1359
1360 kfree(session->targetname);
1361
1331 iscsi_destroy_session(cls_session); 1362 iscsi_destroy_session(cls_session);
1332 scsi_host_put(shost); 1363 scsi_host_put(shost);
1333 module_put(cls_session->transport->owner); 1364 module_put(owner);
1334} 1365}
1335EXPORT_SYMBOL_GPL(iscsi_session_teardown); 1366EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1336 1367
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1361 conn->tmabort_state = TMABORT_INITIAL; 1392 conn->tmabort_state = TMABORT_INITIAL;
1362 INIT_LIST_HEAD(&conn->run_list); 1393 INIT_LIST_HEAD(&conn->run_list);
1363 INIT_LIST_HEAD(&conn->mgmt_run_list); 1394 INIT_LIST_HEAD(&conn->mgmt_run_list);
1364 1395 INIT_LIST_HEAD(&conn->xmitqueue);
1365 /* initialize general xmit PDU commands queue */
1366 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
1367 GFP_KERNEL, NULL);
1368 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
1369 goto xmitqueue_alloc_fail;
1370 1396
1371 /* initialize general immediate & non-immediate PDU commands queue */ 1397 /* initialize general immediate & non-immediate PDU commands queue */
1372 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1398 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1394 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1420 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
1395 if (!data) 1421 if (!data)
1396 goto login_mtask_data_alloc_fail; 1422 goto login_mtask_data_alloc_fail;
1397 conn->login_mtask->data = data; 1423 conn->login_mtask->data = conn->data = data;
1398 1424
1399 init_timer(&conn->tmabort_timer); 1425 init_timer(&conn->tmabort_timer);
1400 mutex_init(&conn->xmitmutex); 1426 mutex_init(&conn->xmitmutex);
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail:
1410mgmtqueue_alloc_fail: 1436mgmtqueue_alloc_fail:
1411 kfifo_free(conn->immqueue); 1437 kfifo_free(conn->immqueue);
1412immqueue_alloc_fail: 1438immqueue_alloc_fail:
1413 kfifo_free(conn->xmitqueue);
1414xmitqueue_alloc_fail:
1415 iscsi_destroy_conn(cls_conn); 1439 iscsi_destroy_conn(cls_conn);
1416 return NULL; 1440 return NULL;
1417} 1441}
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1432 1456
1433 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1457 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1434 mutex_lock(&conn->xmitmutex); 1458 mutex_lock(&conn->xmitmutex);
1435 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
1436 if (session->tt->suspend_conn_recv)
1437 session->tt->suspend_conn_recv(conn);
1438
1439 session->tt->terminate_conn(conn);
1440 }
1441 1459
1442 spin_lock_bh(&session->lock); 1460 spin_lock_bh(&session->lock);
1443 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1461 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1474 } 1492 }
1475 1493
1476 spin_lock_bh(&session->lock); 1494 spin_lock_bh(&session->lock);
1477 kfree(conn->login_mtask->data); 1495 kfree(conn->data);
1496 kfree(conn->persistent_address);
1478 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 1497 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
1479 sizeof(void*)); 1498 sizeof(void*));
1480 list_del(&conn->item); 1499 list_del(&conn->item);
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1489 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; 1508 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
1490 spin_unlock_bh(&session->lock); 1509 spin_unlock_bh(&session->lock);
1491 1510
1492 kfifo_free(conn->xmitqueue);
1493 kfifo_free(conn->immqueue); 1511 kfifo_free(conn->immqueue);
1494 kfifo_free(conn->mgmtqueue); 1512 kfifo_free(conn->mgmtqueue);
1495 1513
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
1572 struct iscsi_cmd_task *ctask, *tmp; 1590 struct iscsi_cmd_task *ctask, *tmp;
1573 1591
1574 /* flush pending */ 1592 /* flush pending */
1575 while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { 1593 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1576 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, 1594 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
1577 ctask->itt); 1595 ctask->itt);
1578 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1596 fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1615 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1633 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1616 spin_unlock_bh(&session->lock); 1634 spin_unlock_bh(&session->lock);
1617 1635
1618 if (session->tt->suspend_conn_recv) 1636 write_lock_bh(conn->recv_lock);
1619 session->tt->suspend_conn_recv(conn); 1637 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1638 write_unlock_bh(conn->recv_lock);
1620 1639
1621 mutex_lock(&conn->xmitmutex); 1640 mutex_lock(&conn->xmitmutex);
1622 /* 1641 /*
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1635 } 1654 }
1636 } 1655 }
1637 1656
1638 session->tt->terminate_conn(conn);
1639 /* 1657 /*
1640 * flush queues. 1658 * flush queues.
1641 */ 1659 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c68cdd8736..d384c16f4a8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST; 223 pmboxq->mb.mbxOwner = OWN_HOST;
224 224
225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
226 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
884 phba->sysfs_mbox.mbox == NULL ) { 884 phba->sysfs_mbox.mbox == NULL ) {
885 sysfs_mbox_idle(phba); 885 sysfs_mbox_idle(phba);
886 spin_unlock_irq(host->host_lock); 886 spin_unlock_irq(host->host_lock);
887 return -EINVAL; 887 return -EAGAIN;
888 } 888 }
889 } 889 }
890 890
@@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1000 spin_unlock_irq(phba->host->host_lock); 1000 spin_unlock_irq(phba->host->host_lock);
1001 rc = lpfc_sli_issue_mbox_wait (phba, 1001 rc = lpfc_sli_issue_mbox_wait (phba,
1002 phba->sysfs_mbox.mbox, 1002 phba->sysfs_mbox.mbox,
1003 phba->fc_ratov * 2); 1003 lpfc_mbox_tmo_val(phba,
1004 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1004 spin_lock_irq(phba->host->host_lock); 1005 spin_lock_irq(phba->host->host_lock);
1005 } 1006 }
1006 1007
1007 if (rc != MBX_SUCCESS) { 1008 if (rc != MBX_SUCCESS) {
1008 sysfs_mbox_idle(phba); 1009 sysfs_mbox_idle(phba);
1009 spin_unlock_irq(host->host_lock); 1010 spin_unlock_irq(host->host_lock);
1010 return -ENODEV; 1011 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1011 } 1012 }
1012 phba->sysfs_mbox.state = SMBOX_READING; 1013 phba->sysfs_mbox.state = SMBOX_READING;
1013 } 1014 }
@@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1016 printk(KERN_WARNING "mbox_read: Bad State\n"); 1017 printk(KERN_WARNING "mbox_read: Bad State\n");
1017 sysfs_mbox_idle(phba); 1018 sysfs_mbox_idle(phba);
1018 spin_unlock_irq(host->host_lock); 1019 spin_unlock_irq(host->host_lock);
1019 return -EINVAL; 1020 return -EAGAIN;
1020 } 1021 }
1021 1022
1022 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 1023 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
1210 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1211 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1211 struct lpfc_sli *psli = &phba->sli; 1212 struct lpfc_sli *psli = &phba->sli;
1212 struct fc_host_statistics *hs = &phba->link_stats; 1213 struct fc_host_statistics *hs = &phba->link_stats;
1214 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1213 LPFC_MBOXQ_t *pmboxq; 1215 LPFC_MBOXQ_t *pmboxq;
1214 MAILBOX_t *pmb; 1216 MAILBOX_t *pmb;
1217 unsigned long seconds;
1215 int rc = 0; 1218 int rc = 0;
1216 1219
1217 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1220 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
1272 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 1275 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1273 hs->error_frames = pmb->un.varRdLnk.crcCnt; 1276 hs->error_frames = pmb->un.varRdLnk.crcCnt;
1274 1277
1278 hs->link_failure_count -= lso->link_failure_count;
1279 hs->loss_of_sync_count -= lso->loss_of_sync_count;
1280 hs->loss_of_signal_count -= lso->loss_of_signal_count;
1281 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
1282 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
1283 hs->invalid_crc_count -= lso->invalid_crc_count;
1284 hs->error_frames -= lso->error_frames;
1285
1275 if (phba->fc_topology == TOPOLOGY_LOOP) { 1286 if (phba->fc_topology == TOPOLOGY_LOOP) {
1276 hs->lip_count = (phba->fc_eventTag >> 1); 1287 hs->lip_count = (phba->fc_eventTag >> 1);
1288 hs->lip_count -= lso->link_events;
1277 hs->nos_count = -1; 1289 hs->nos_count = -1;
1278 } else { 1290 } else {
1279 hs->lip_count = -1; 1291 hs->lip_count = -1;
1280 hs->nos_count = (phba->fc_eventTag >> 1); 1292 hs->nos_count = (phba->fc_eventTag >> 1);
1293 hs->nos_count -= lso->link_events;
1281 } 1294 }
1282 1295
1283 hs->dumped_frames = -1; 1296 hs->dumped_frames = -1;
1284 1297
1285/* FIX ME */ 1298 seconds = get_seconds();
1286 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ 1299 if (seconds < psli->stats_start)
1300 hs->seconds_since_last_reset = seconds +
1301 ((unsigned long)-1 - psli->stats_start);
1302 else
1303 hs->seconds_since_last_reset = seconds - psli->stats_start;
1287 1304
1288 return hs; 1305 return hs;
1289} 1306}
1290 1307
1308static void
1309lpfc_reset_stats(struct Scsi_Host *shost)
1310{
1311 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1312 struct lpfc_sli *psli = &phba->sli;
1313 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1314 LPFC_MBOXQ_t *pmboxq;
1315 MAILBOX_t *pmb;
1316 int rc = 0;
1317
1318 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1319 if (!pmboxq)
1320 return;
1321 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1322
1323 pmb = &pmboxq->mb;
1324 pmb->mbxCommand = MBX_READ_STATUS;
1325 pmb->mbxOwner = OWN_HOST;
1326 pmb->un.varWords[0] = 0x1; /* reset request */
1327 pmboxq->context1 = NULL;
1328
1329 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1330 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1331 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1332 else
1333 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1334
1335 if (rc != MBX_SUCCESS) {
1336 if (rc == MBX_TIMEOUT)
1337 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1338 else
1339 mempool_free(pmboxq, phba->mbox_mem_pool);
1340 return;
1341 }
1342
1343 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1344 pmb->mbxCommand = MBX_READ_LNK_STAT;
1345 pmb->mbxOwner = OWN_HOST;
1346 pmboxq->context1 = NULL;
1347
1348 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1349 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1350 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1351 else
1352 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1353
1354 if (rc != MBX_SUCCESS) {
1355 if (rc == MBX_TIMEOUT)
1356 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1357 else
1358 mempool_free( pmboxq, phba->mbox_mem_pool);
1359 return;
1360 }
1361
1362 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
1363 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
1364 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
1365 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
1366 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
1367 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1368 lso->error_frames = pmb->un.varRdLnk.crcCnt;
1369 lso->link_events = (phba->fc_eventTag >> 1);
1370
1371 psli->stats_start = get_seconds();
1372
1373 return;
1374}
1291 1375
1292/* 1376/*
1293 * The LPFC driver treats linkdown handling as target loss events so there 1377 * The LPFC driver treats linkdown handling as target loss events so there
@@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = {
1431 */ 1515 */
1432 1516
1433 .get_fc_host_stats = lpfc_get_stats, 1517 .get_fc_host_stats = lpfc_get_stats,
1434 1518 .reset_fc_host_stats = lpfc_reset_stats,
1435 /* the LPFC driver doesn't support resetting stats yet */
1436 1519
1437 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 1520 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
1438 .show_rport_maxframe_size = 1, 1521 .show_rport_maxframe_size = 1,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 517e9e4dd46..2a176467f71 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
130int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
130 131
131int lpfc_mem_alloc(struct lpfc_hba *); 132int lpfc_mem_alloc(struct lpfc_hba *);
132void lpfc_mem_free(struct lpfc_hba *); 133void lpfc_mem_free(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57af53..bbb7310210b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
131 } 131 }
132 132
133ct_unsol_event_exit_piocbq: 133ct_unsol_event_exit_piocbq:
134 list_del(&head);
134 if (pmbuf) { 135 if (pmbuf) {
135 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { 136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
136 lpfc_mbuf_free(phba, matp->virt, matp->phys); 137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
481 if (CTrsp->CommandResponse.bits.CmdRsp == 482 if (CTrsp->CommandResponse.bits.CmdRsp ==
482 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 483 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
483 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 484 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
484 "%d:0239 NameServer Rsp " 485 "%d:0208 NameServer Rsp "
485 "Data: x%x\n", 486 "Data: x%x\n",
486 phba->brd_no, 487 phba->brd_no,
487 phba->fc_flag); 488 phba->fc_flag);
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
588 589
589 lpfc_decode_firmware_rev(phba, fwrev, 0); 590 lpfc_decode_firmware_rev(phba, fwrev, 0);
590 591
591 if (phba->Port[0]) { 592 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
592 sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, 593 fwrev, lpfc_release_version);
593 phba->Port, fwrev, lpfc_release_version); 594 return;
594 } else {
595 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
596 fwrev, lpfc_release_version);
597 }
598} 595}
599 596
600/* 597/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b89f6cb641e..3567de61316 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1848,9 +1848,12 @@ static void
1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1849 struct lpfc_iocbq * rspiocb) 1849 struct lpfc_iocbq * rspiocb)
1850{ 1850{
1851 IOCB_t *irsp;
1851 struct lpfc_nodelist *ndlp; 1852 struct lpfc_nodelist *ndlp;
1852 LPFC_MBOXQ_t *mbox = NULL; 1853 LPFC_MBOXQ_t *mbox = NULL;
1853 1854
1855 irsp = &rspiocb->iocb;
1856
1854 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1857 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1855 if (cmdiocb->context_un.mbox) 1858 if (cmdiocb->context_un.mbox)
1856 mbox = cmdiocb->context_un.mbox; 1859 mbox = cmdiocb->context_un.mbox;
@@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1893 mempool_free( mbox, phba->mbox_mem_pool); 1896 mempool_free( mbox, phba->mbox_mem_pool);
1894 } else { 1897 } else {
1895 mempool_free( mbox, phba->mbox_mem_pool); 1898 mempool_free( mbox, phba->mbox_mem_pool);
1896 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1897 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1898 ndlp = NULL; 1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1902 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1903 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1904 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1905 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1906 ndlp = NULL;
1907 }
1899 } 1908 }
1900 } 1909 }
1901 } 1910 }
@@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2839 2848
2840 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 2849 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2841 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2850 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2842 "%d:0128 Xmit ELS RPS ACC response tag x%x " 2851 "%d:0118 Xmit ELS RPS ACC response tag x%x "
2843 "Data: x%x x%x x%x x%x x%x\n", 2852 "Data: x%x x%x x%x x%x x%x\n",
2844 phba->brd_no, 2853 phba->brd_no,
2845 elsiocb->iocb.ulpIoTag, 2854 elsiocb->iocb.ulpIoTag,
@@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2948 2957
2949 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 2958 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2950 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2959 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2951 "%d:0128 Xmit ELS RPL ACC response tag x%x " 2960 "%d:0120 Xmit ELS RPL ACC response tag x%x "
2952 "Data: x%x x%x x%x x%x x%x\n", 2961 "Data: x%x x%x x%x x%x x%x\n",
2953 phba->brd_no, 2962 phba->brd_no,
2954 elsiocb->iocb.ulpIoTag, 2963 elsiocb->iocb.ulpIoTag,
@@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3109 struct lpfc_nodelist *ndlp, *next_ndlp; 3118 struct lpfc_nodelist *ndlp, *next_ndlp;
3110 3119
3111 /* FAN received */ 3120 /* FAN received */
3112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", 3121 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
3113 phba->brd_no); 3122 phba->brd_no);
3114 3123
3115 icmd = &cmdiocb->iocb; 3124 icmd = &cmdiocb->iocb;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4d6cf990c4f..b2f1552f184 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1558 } 1558 }
1559 } 1559 }
1560
1561 spin_lock_irq(phba->host->host_lock);
1560 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1562 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1561 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1563 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1562 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1564 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1569 mempool_free(mb, phba->mbox_mem_pool); 1571 mempool_free(mb, phba->mbox_mem_pool);
1570 } 1572 }
1571 } 1573 }
1574 spin_unlock_irq(phba->host->host_lock);
1572 1575
1573 lpfc_els_abort(phba,ndlp,0); 1576 lpfc_els_abort(phba,ndlp,0);
1574 spin_lock_irq(phba->host->host_lock); 1577 spin_lock_irq(phba->host->host_lock);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1782 /* LOG change to REGLOGIN */ 1785 /* LOG change to REGLOGIN */
1783 /* FIND node DID reglogin */ 1786 /* FIND node DID reglogin */
1784 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1785 "%d:0931 FIND node DID reglogin" 1788 "%d:0901 FIND node DID reglogin"
1786 " Data: x%p x%x x%x x%x\n", 1789 " Data: x%p x%x x%x x%x\n",
1787 phba->brd_no, 1790 phba->brd_no,
1788 ndlp, ndlp->nlp_DID, 1791 ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1805 /* LOG change to PRLI */ 1808 /* LOG change to PRLI */
1806 /* FIND node DID prli */ 1809 /* FIND node DID prli */
1807 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1808 "%d:0931 FIND node DID prli " 1811 "%d:0902 FIND node DID prli "
1809 "Data: x%p x%x x%x x%x\n", 1812 "Data: x%p x%x x%x x%x\n",
1810 phba->brd_no, 1813 phba->brd_no,
1811 ndlp, ndlp->nlp_DID, 1814 ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1828 /* LOG change to NPR */ 1831 /* LOG change to NPR */
1829 /* FIND node DID npr */ 1832 /* FIND node DID npr */
1830 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1831 "%d:0931 FIND node DID npr " 1834 "%d:0903 FIND node DID npr "
1832 "Data: x%p x%x x%x x%x\n", 1835 "Data: x%p x%x x%x x%x\n",
1833 phba->brd_no, 1836 phba->brd_no,
1834 ndlp, ndlp->nlp_DID, 1837 ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1851 /* LOG change to UNUSED */ 1854 /* LOG change to UNUSED */
1852 /* FIND node DID unused */ 1855 /* FIND node DID unused */
1853 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1854 "%d:0931 FIND node DID unused " 1857 "%d:0905 FIND node DID unused "
1855 "Data: x%p x%x x%x x%x\n", 1858 "Data: x%p x%x x%x x%x\n",
1856 phba->brd_no, 1859 phba->brd_no,
1857 ndlp, ndlp->nlp_DID, 1860 ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2335 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2338 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2336 if (!initlinkmbox) { 2339 if (!initlinkmbox) {
2337 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2340 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2338 "%d:0226 Device Discovery " 2341 "%d:0206 Device Discovery "
2339 "completion error\n", 2342 "completion error\n",
2340 phba->brd_no); 2343 phba->brd_no);
2341 phba->hba_state = LPFC_HBA_ERROR; 2344 phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2365 if (!clearlambox) { 2368 if (!clearlambox) {
2366 clrlaerr = 1; 2369 clrlaerr = 1;
2367 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2370 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2368 "%d:0226 Device Discovery " 2371 "%d:0207 Device Discovery "
2369 "completion error\n", 2372 "completion error\n",
2370 phba->brd_no); 2373 phba->brd_no);
2371 phba->hba_state = LPFC_HBA_ERROR; 2374 phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ef47b824cbe..f6948ffe689 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba)
1379 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1380 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1381 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
1382 phba->work_ha = 0;
1382 1383
1383 lpfc_printf_log(phba, 1384 lpfc_printf_log(phba,
1384 KERN_WARNING, 1385 KERN_WARNING,
@@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1616 goto out_free_iocbq; 1617 goto out_free_iocbq;
1617 } 1618 }
1618 1619
1619 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1620 /*
1621 * Set initial can_queue value since 0 is no longer supported and
1622 * scsi_add_host will fail. This will be adjusted later based on the
1623 * max xri value determined in hba setup.
1624 */
1620 host->can_queue = phba->cfg_hba_queue_depth - 10; 1625 host->can_queue = phba->cfg_hba_queue_depth - 10;
1621 1626
1622 /* Tell the midlayer we support 16 byte commands */ 1627 /* Tell the midlayer we support 16 byte commands */
@@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1656 goto out_free_irq; 1661 goto out_free_irq;
1657 } 1662 }
1658 1663
1664 /*
1665 * hba setup may have changed the hba_queue_depth so we need to adjust
1666 * the value of can_queue.
1667 */
1668 host->can_queue = phba->cfg_hba_queue_depth - 10;
1669
1659 lpfc_discovery_wait(phba); 1670 lpfc_discovery_wait(phba);
1660 1671
1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1672 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22aaf71..4d016c2a1b2 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
651 651
652 return mbq; 652 return mbq;
653} 653}
654
655int
656lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
657{
658 switch (cmd) {
659 case MBX_WRITE_NV: /* 0x03 */
660 case MBX_UPDATE_CFG: /* 0x1B */
661 case MBX_DOWN_LOAD: /* 0x1C */
662 case MBX_DEL_LD_ENTRY: /* 0x1D */
663 case MBX_LOAD_AREA: /* 0x81 */
664 case MBX_FLASH_WR_ULA: /* 0x98 */
665 case MBX_LOAD_EXP_ROM: /* 0x9C */
666 return LPFC_MBOX_TMO_FLASH_CMD;
667 }
668 return LPFC_MBOX_TMO;
669}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd0b0e293d6..20449a8dd53 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
179 179
180 /* Abort outstanding I/O on NPort <nlp_DID> */ 180 /* Abort outstanding I/O on NPort <nlp_DID> */
181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
182 "%d:0201 Abort outstanding I/O on NPort x%x " 182 "%d:0205 Abort outstanding I/O on NPort x%x "
183 "Data: x%x x%x x%x\n", 183 "Data: x%x x%x x%x\n",
184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
185 ndlp->nlp_state, ndlp->nlp_rpi); 185 ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
393 mbox->context2 = ndlp; 393 mbox->context2 = ndlp;
394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
395 395
396 /*
397 * If there is an outstanding PLOGI issued, abort it before
398 * sending ACC rsp for received PLOGI. If pending plogi
399 * is not canceled here, the plogi will be rejected by
400 * remote port and will be retried. On a configuration with
401 * single discovery thread, this will cause a huge delay in
402 * discovery. Also this will cause multiple state machines
403 * running in parallel for this node.
404 */
405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
406 /* software abort outstanding PLOGI */
407 lpfc_els_abort(phba, ndlp, 1);
408 }
409
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return 1; 411 return 1;
398 412
@@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1601 1615
1602 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1616 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1603 1617
1604 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1618 /*
1619 * Do not start discovery if discovery is about to start
1620 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads.
1622 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1605 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1606 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1607 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a760a44173d..a8816a8738f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/delay.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
@@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
841 return 0; 842 return 0;
842} 843}
843 844
845static void
846lpfc_block_error_handler(struct scsi_cmnd *cmnd)
847{
848 struct Scsi_Host *shost = cmnd->device->host;
849 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
850
851 spin_lock_irq(shost->host_lock);
852 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
853 spin_unlock_irq(shost->host_lock);
854 msleep(1000);
855 spin_lock_irq(shost->host_lock);
856 }
857 spin_unlock_irq(shost->host_lock);
858 return;
859}
844 860
845static int 861static int
846lpfc_abort_handler(struct scsi_cmnd *cmnd) 862lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
855 unsigned int loop_count = 0; 871 unsigned int loop_count = 0;
856 int ret = SUCCESS; 872 int ret = SUCCESS;
857 873
874 lpfc_block_error_handler(cmnd);
858 spin_lock_irq(shost->host_lock); 875 spin_lock_irq(shost->host_lock);
859 876
860 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 877 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
957 int ret = FAILED; 974 int ret = FAILED;
958 int cnt, loopcnt; 975 int cnt, loopcnt;
959 976
977 lpfc_block_error_handler(cmnd);
960 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
961 /* 979 /*
962 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
@@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1073 int cnt, loopcnt; 1091 int cnt, loopcnt;
1074 struct lpfc_scsi_buf * lpfc_cmd; 1092 struct lpfc_scsi_buf * lpfc_cmd;
1075 1093
1094 lpfc_block_error_handler(cmnd);
1076 spin_lock_irq(shost->host_lock); 1095 spin_lock_irq(shost->host_lock);
1077 1096
1078 lpfc_cmd = lpfc_get_scsi_buf(phba); 1097 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 ndlp->rport->dd_data); 1123 ndlp->rport->dd_data);
1105 if (ret != SUCCESS) { 1124 if (ret != SUCCESS) {
1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1125 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1107 "%d:0713 Bus Reset on target %d failed\n", 1126 "%d:0700 Bus Reset on target %d failed\n",
1108 phba->brd_no, i); 1127 phba->brd_no, i);
1109 err_count++; 1128 err_count++;
1110 } 1129 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 350a625fa22..70f4d5a1348 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
320 kfree(old_arr); 320 kfree(old_arr);
321 return iotag; 321 return iotag;
322 } 322 }
323 } 323 } else
324 spin_unlock_irq(phba->host->host_lock);
324 325
325 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 326 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
326 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 327 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
969 * resources need to be recovered. 970 * resources need to be recovered.
970 */ 971 */
971 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 972 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
972 printk(KERN_INFO "%s: IOCB cmd 0x%x processed." 973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
973 " Skipping completion\n", __FUNCTION__, 974 "%d:0314 IOCB cmd 0x%x"
974 irsp->ulpCommand); 975 " processed. Skipping"
976 " completion", phba->brd_no,
977 irsp->ulpCommand);
975 break; 978 break;
976 } 979 }
977 980
@@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1104 if (unlikely(irsp->ulpStatus)) { 1107 if (unlikely(irsp->ulpStatus)) {
1105 /* Rsp ring <ringno> error: IOCB */ 1108 /* Rsp ring <ringno> error: IOCB */
1106 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1109 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1107 "%d:0326 Rsp Ring %d error: IOCB Data: " 1110 "%d:0336 Rsp Ring %d error: IOCB Data: "
1108 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1111 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1109 phba->brd_no, pring->ringno, 1112 phba->brd_no, pring->ringno,
1110 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1113 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1122 * resources need to be recovered. 1125 * resources need to be recovered.
1123 */ 1126 */
1124 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1127 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1125 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 1128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1126 "Skipping completion\n", __FUNCTION__, 1129 "%d:0333 IOCB cmd 0x%x"
1127 irsp->ulpCommand); 1130 " processed. Skipping"
1131 " completion\n", phba->brd_no,
1132 irsp->ulpCommand);
1128 break; 1133 break;
1129 } 1134 }
1130 1135
@@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1155 } else { 1160 } else {
1156 /* Unknown IOCB command */ 1161 /* Unknown IOCB command */
1157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1158 "%d:0321 Unknown IOCB command " 1163 "%d:0334 Unknown IOCB command "
1159 "Data: x%x, x%x x%x x%x x%x\n", 1164 "Data: x%x, x%x x%x x%x x%x\n",
1160 phba->brd_no, type, irsp->ulpCommand, 1165 phba->brd_no, type, irsp->ulpCommand,
1161 irsp->ulpStatus, irsp->ulpIoTag, 1166 irsp->ulpStatus, irsp->ulpIoTag,
@@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1238 lpfc_printf_log(phba, 1243 lpfc_printf_log(phba,
1239 KERN_ERR, 1244 KERN_ERR,
1240 LOG_SLI, 1245 LOG_SLI,
1241 "%d:0312 Ring %d handler: portRspPut %d " 1246 "%d:0303 Ring %d handler: portRspPut %d "
1242 "is bigger then rsp ring %d\n", 1247 "is bigger then rsp ring %d\n",
1243 phba->brd_no, 1248 phba->brd_no,
1244 pring->ringno, portRspPut, portRspMax); 1249 pring->ringno, portRspPut, portRspMax);
@@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1383 lpfc_printf_log(phba, 1388 lpfc_printf_log(phba,
1384 KERN_ERR, 1389 KERN_ERR,
1385 LOG_SLI, 1390 LOG_SLI,
1386 "%d:0321 Unknown IOCB command " 1391 "%d:0335 Unknown IOCB command "
1387 "Data: x%x x%x x%x x%x\n", 1392 "Data: x%x x%x x%x x%x\n",
1388 phba->brd_no, 1393 phba->brd_no,
1389 irsp->ulpCommand, 1394 irsp->ulpCommand,
@@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1399 next_iocb, 1404 next_iocb,
1400 &saveq->list, 1405 &saveq->list,
1401 list) { 1406 list) {
1407 list_del(&rspiocbp->list);
1402 lpfc_sli_release_iocbq(phba, 1408 lpfc_sli_release_iocbq(phba,
1403 rspiocbp); 1409 rspiocbp);
1404 } 1410 }
1405 } 1411 }
1406
1407 lpfc_sli_release_iocbq(phba, saveq); 1412 lpfc_sli_release_iocbq(phba, saveq);
1408 } 1413 }
1409 } 1414 }
@@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1711 phba->fc_myDID = 0; 1716 phba->fc_myDID = 0;
1712 phba->fc_prevDID = 0; 1717 phba->fc_prevDID = 0;
1713 1718
1714 psli->sli_flag = 0;
1715
1716 /* Turn off parity checking and serr during the physical reset */ 1719 /* Turn off parity checking and serr during the physical reset */
1717 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1720 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1718 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1721 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1719 (cfg_value & 1722 (cfg_value &
1720 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1723 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1721 1724
1722 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1725 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1723 /* Now toggle INITFF bit in the Host Control Register */ 1726 /* Now toggle INITFF bit in the Host Control Register */
1724 writel(HC_INITFF, phba->HCregaddr); 1727 writel(HC_INITFF, phba->HCregaddr);
1725 mdelay(1); 1728 mdelay(1);
@@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1760 1763
1761 /* Restart HBA */ 1764 /* Restart HBA */
1762 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1763 "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, 1766 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1764 phba->hba_state, psli->sli_flag); 1767 phba->hba_state, psli->sli_flag);
1765 1768
1766 word0 = 0; 1769 word0 = 0;
@@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1792 1795
1793 spin_unlock_irq(phba->host->host_lock); 1796 spin_unlock_irq(phba->host->host_lock);
1794 1797
1798 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1799 psli->stats_start = get_seconds();
1800
1795 if (skip_post) 1801 if (skip_post)
1796 mdelay(100); 1802 mdelay(100);
1797 else 1803 else
@@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1902 } 1908 }
1903 1909
1904 while (resetcount < 2 && !done) { 1910 while (resetcount < 2 && !done) {
1911 spin_lock_irq(phba->host->host_lock);
1912 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1913 spin_unlock_irq(phba->host->host_lock);
1905 phba->hba_state = LPFC_STATE_UNKNOWN; 1914 phba->hba_state = LPFC_STATE_UNKNOWN;
1906 lpfc_sli_brdrestart(phba); 1915 lpfc_sli_brdrestart(phba);
1907 msleep(2500); 1916 msleep(2500);
@@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1909 if (rc) 1918 if (rc)
1910 break; 1919 break;
1911 1920
1921 spin_lock_irq(phba->host->host_lock);
1922 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1923 spin_unlock_irq(phba->host->host_lock);
1912 resetcount++; 1924 resetcount++;
1913 1925
1914 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1926 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2194 return (MBX_NOT_FINISHED); 2206 return (MBX_NOT_FINISHED);
2195 } 2207 }
2196 /* timeout active mbox command */ 2208 /* timeout active mbox command */
2197 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 2209 mod_timer(&psli->mbox_tmo, (jiffies +
2210 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2198 } 2211 }
2199 2212
2200 /* Mailbox cmd <cmd> issue */ 2213 /* Mailbox cmd <cmd> issue */
@@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2254 break; 2267 break;
2255 2268
2256 case MBX_POLL: 2269 case MBX_POLL:
2257 i = 0;
2258 psli->mbox_active = NULL; 2270 psli->mbox_active = NULL;
2259 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2271 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2260 /* First read mbox status word */ 2272 /* First read mbox status word */
@@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2268 /* Read the HBA Host Attention Register */ 2280 /* Read the HBA Host Attention Register */
2269 ha_copy = readl(phba->HAregaddr); 2281 ha_copy = readl(phba->HAregaddr);
2270 2282
2283 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2284 i *= 1000; /* Convert to ms */
2285
2271 /* Wait for command to complete */ 2286 /* Wait for command to complete */
2272 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2287 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2273 (!(ha_copy & HA_MBATT) && 2288 (!(ha_copy & HA_MBATT) &&
2274 (phba->hba_state > LPFC_WARM_START))) { 2289 (phba->hba_state > LPFC_WARM_START))) {
2275 if (i++ >= 100) { 2290 if (i-- <= 0) {
2276 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2277 spin_unlock_irqrestore(phba->host->host_lock, 2292 spin_unlock_irqrestore(phba->host->host_lock,
2278 drvr_flag); 2293 drvr_flag);
@@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2290 2305
2291 /* Can be in interrupt context, do not sleep */ 2306 /* Can be in interrupt context, do not sleep */
2292 /* (or might be called with interrupts disabled) */ 2307 /* (or might be called with interrupts disabled) */
2293 mdelay(i); 2308 mdelay(1);
2294 2309
2295 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2310 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2296 2311
@@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3005 3020
3006 if (timeleft == 0) { 3021 if (timeleft == 0) {
3007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3008 "%d:0329 IOCB wait timeout error - no " 3023 "%d:0338 IOCB wait timeout error - no "
3009 "wake response Data x%x\n", 3024 "wake response Data x%x\n",
3010 phba->brd_no, timeout); 3025 phba->brd_no, timeout);
3011 retval = IOCB_TIMEDOUT; 3026 retval = IOCB_TIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index d8ef0d2894d..e26de680935 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -172,6 +172,18 @@ struct lpfc_sli_stat {
172 uint32_t mbox_busy; /* Mailbox cmd busy */ 172 uint32_t mbox_busy; /* Mailbox cmd busy */
173}; 173};
174 174
175/* Structure to store link status values when port stats are reset */
176struct lpfc_lnk_stat {
177 uint32_t link_failure_count;
178 uint32_t loss_of_sync_count;
179 uint32_t loss_of_signal_count;
180 uint32_t prim_seq_protocol_err_count;
181 uint32_t invalid_tx_word_count;
182 uint32_t invalid_crc_count;
183 uint32_t error_frames;
184 uint32_t link_events;
185};
186
175/* Structure used to hold SLI information */ 187/* Structure used to hold SLI information */
176struct lpfc_sli { 188struct lpfc_sli {
177 uint32_t num_rings; 189 uint32_t num_rings;
@@ -201,6 +213,8 @@ struct lpfc_sli {
201 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ 213 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
202 size_t iocbq_lookup_len; /* current lengs of the array */ 214 size_t iocbq_lookup_len; /* current lengs of the array */
203 uint16_t last_iotag; /* last allocated IOTAG */ 215 uint16_t last_iotag; /* last allocated IOTAG */
216 unsigned long stats_start; /* in seconds */
217 struct lpfc_lnk_stat lnk_stat_offsets;
204}; 218};
205 219
206/* Given a pointer to the start of the ring, and the slot number of 220/* Given a pointer to the start of the ring, and the slot number of
@@ -211,3 +225,9 @@ struct lpfc_sli {
211 225
212#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 226#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
213 command */ 227 command */
228#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
229 * or erase cmds. This is especially
230 * long because of the potential of
231 * multiple flash erases that can be
232 * spawned.
233 */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 10e89c6ae82..c7091ea29f3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.7" 21#define LPFC_DRIVER_VERSION "8.1.9"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343228a..8cd0bd1d0f7 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
37#define LSI_MAX_CHANNELS 16 37#define LSI_MAX_CHANNELS 16
38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) 38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
39 39
40#define HBA_SIGNATURE_64_BIT 0x299
41#define PCI_CONF_AMISIG64 0xa4
42
43#define MEGA_SCSI_INQ_EVPD 1
44#define MEGA_INVALID_FIELD_IN_CDB 0x24
45
40 46
41/** 47/**
42 * scb_t - scsi command control block 48 * scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee144a1c..b8aa34202ec 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
132/* Driver Data: */ 132/* Driver Data: */
133 void __user * user_data; 133 void __user * user_data;
134 uint32_t user_data_len; 134 uint32_t user_data_len;
135
136 /* 64bit alignment */
137 uint32_t pad_for_64bit_align;
138
135 mraid_passthru_t __user *user_pthru; 139 mraid_passthru_t __user *user_pthru;
136 140
137 mraid_passthru_t *pthru32; 141 mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 92715130ac0..cd982c877da 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.8 (Apr 11 2006) 13 * Version : v2.20.4.9 (Jul 16 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter)
720 struct pci_dev *pdev; 720 struct pci_dev *pdev;
721 mraid_device_t *raid_dev; 721 mraid_device_t *raid_dev;
722 int i; 722 int i;
723 uint32_t magic64;
723 724
724 725
725 adapter->ito = MBOX_TIMEOUT; 726 adapter->ito = MBOX_TIMEOUT;
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter)
863 864
864 // Set the DMA mask to 64-bit. All supported controllers as capable of 865 // Set the DMA mask to 64-bit. All supported controllers as capable of
865 // DMA in this range 866 // DMA in this range
866 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { 867 pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
867 868
868 con_log(CL_ANN, (KERN_WARNING 869 if (((magic64 == HBA_SIGNATURE_64_BIT) &&
869 "megaraid: could not set DMA mask for 64-bit.\n")); 870 ((adapter->pdev->subsystem_device !=
871 PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
872 (adapter->pdev->subsystem_device !=
873 PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
874 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
875 adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
876 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
877 adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
878 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
879 adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
880 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
881 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
882 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
883 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
884 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
885 con_log(CL_ANN, (KERN_WARNING
886 "megaraid: DMA mask for 64-bit failed\n"));
870 887
871 goto out_free_sysfs_res; 888 if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
889 con_log(CL_ANN, (KERN_WARNING
890 "megaraid: 32-bit DMA mask failed\n"));
891 goto out_free_sysfs_res;
892 }
893 }
872 } 894 }
873 895
874 // setup tasklet for DPC 896 // setup tasklet for DPC
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1622 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1644 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1623 } 1645 }
1624 1646
1647 if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1648 scp->sense_buffer[0] = 0x70;
1649 scp->sense_buffer[2] = ILLEGAL_REQUEST;
1650 scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1651 scp->result = CHECK_CONDITION << 1;
1652 return NULL;
1653 }
1654
1625 /* Fall through */ 1655 /* Fall through */
1626 1656
1627 case READ_CAPACITY: 1657 case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0ec93e..2b5a3285f79 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.8" 24#define MEGARAID_VERSION "2.20.4.9"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" 25#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534fb336..d85b9a8f1b8 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mm.c 12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.6 (Mar 7 2005) 13 * Version : v2.20.2.7 (Jul 16 2006)
14 * 14 *
15 * Common management module 15 * Common management module
16 */ 16 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d6849..c8762b2b8ed 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
27#include "megaraid_ioctl.h" 27#include "megaraid_ioctl.h"
28 28
29 29
30#define LSI_COMMON_MOD_VERSION "2.20.2.6" 30#define LSI_COMMON_MOD_VERSION "2.20.2.7"
31#define LSI_COMMON_MOD_EXT_VERSION \ 31#define LSI_COMMON_MOD_EXT_VERSION \
32 "(Release Date: Mon Mar 7 00:01:03 EST 2005)" 32 "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
33 33
34 34
35#define LSI_DBGLVL dbglevel 35#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index d1f38c32aa1..efc8fff1d25 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = {
183 { 183 {
184 .sht = &adma_ata_sht, 184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, 186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
187 .pio_mask = 0x10, /* pio4 */ 188 .pio_mask = 0x10, /* pio4 */
188 .udma_mask = 0x1f, /* udma0-4 */ 189 .udma_mask = 0x1f, /* udma0-4 */
189 .port_ops = &adma_ata_ops, 190 .port_ops = &adma_ata_ops,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 139ea0e27fd..0930260aec2 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ 487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ 488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
489 /* used. */ 489 /* used. */
490#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
490#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ 491#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
491#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ 492#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
492#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ 493#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9758dba9554..859649160ca 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3063int 3063int
3064qla2x00_abort_isp(scsi_qla_host_t *ha) 3064qla2x00_abort_isp(scsi_qla_host_t *ha)
3065{ 3065{
3066 int rval;
3066 unsigned long flags = 0; 3067 unsigned long flags = 0;
3067 uint16_t cnt; 3068 uint16_t cnt;
3068 srb_t *sp; 3069 srb_t *sp;
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3119 3120
3120 ha->isp_abort_cnt = 0; 3121 ha->isp_abort_cnt = 0;
3121 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3122 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3123
3124 if (ha->eft) {
3125 rval = qla2x00_trace_control(ha, TC_ENABLE,
3126 ha->eft_dma, EFT_NUM_BUFFERS);
3127 if (rval) {
3128 qla_printk(KERN_WARNING, ha,
3129 "Unable to reinitialize EFT "
3130 "(%d).\n", rval);
3131 }
3132 }
3122 } else { /* failed the ISP abort */ 3133 } else { /* failed the ISP abort */
3123 ha->flags.online = 1; 3134 ha->flags.online = 1;
3124 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3135 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2b60a27eff0..c5b3c610a32 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
471 mrk24->nport_handle = cpu_to_le16(loop_id); 471 mrk24->nport_handle = cpu_to_le16(loop_id);
472 mrk24->lun[1] = LSB(lun); 472 mrk24->lun[1] = LSB(lun);
473 mrk24->lun[2] = MSB(lun); 473 mrk24->lun[2] = MSB(lun);
474 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
474 } else { 475 } else {
475 SET_TARGET_ID(ha, mrk->target, loop_id); 476 SET_TARGET_ID(ha, mrk->target, loop_id);
476 mrk->lun = cpu_to_le16(lun); 477 mrk->lun = cpu_to_le16(lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 795bf15b1b8..de0613135f7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
589 break; 589 break;
590
591 case MBA_TRACE_NOTIFICATION:
592 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
593 ha->host_no, mb[1], mb[2]));
594 break;
590 } 595 }
591} 596}
592 597
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ec7ebb6037e..65cbe2f5eea 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
744{ 744{
745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
747 srb_t *sp;
748 int ret; 747 int ret;
749 unsigned int id, lun; 748 unsigned int id, lun;
750 unsigned long serial; 749 unsigned long serial;
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
755 lun = cmd->device->lun; 754 lun = cmd->device->lun;
756 serial = cmd->serial_number; 755 serial = cmd->serial_number;
757 756
758 sp = (srb_t *) CMD_SP(cmd); 757 if (!fcport)
759 if (!sp || !fcport)
760 return ret; 758 return ret;
761 759
762 qla_printk(KERN_INFO, ha, 760 qla_printk(KERN_INFO, ha,
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
875{ 873{
876 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 874 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
877 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 875 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
878 srb_t *sp;
879 int ret; 876 int ret;
880 unsigned int id, lun; 877 unsigned int id, lun;
881 unsigned long serial; 878 unsigned long serial;
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
886 lun = cmd->device->lun; 883 lun = cmd->device->lun;
887 serial = cmd->serial_number; 884 serial = cmd->serial_number;
888 885
889 sp = (srb_t *) CMD_SP(cmd); 886 if (!fcport)
890 if (!sp || !fcport)
891 return ret; 887 return ret;
892 888
893 qla_printk(KERN_INFO, ha, 889 qla_printk(KERN_INFO, ha,
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
936{ 932{
937 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 933 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
938 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 934 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
939 srb_t *sp;
940 int ret; 935 int ret;
941 unsigned int id, lun; 936 unsigned int id, lun;
942 unsigned long serial; 937 unsigned long serial;
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
947 lun = cmd->device->lun; 942 lun = cmd->device->lun;
948 serial = cmd->serial_number; 943 serial = cmd->serial_number;
949 944
950 sp = (srb_t *) CMD_SP(cmd); 945 if (!fcport)
951 if (!sp || !fcport)
952 return ret; 946 return ret;
953 947
954 qla_printk(KERN_INFO, ha, 948 qla_printk(KERN_INFO, ha,
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data)
2244 2238
2245 next_loopid = 0; 2239 next_loopid = 0;
2246 list_for_each_entry(fcport, &ha->fcports, list) { 2240 list_for_each_entry(fcport, &ha->fcports, list) {
2247 if (fcport->port_type != FCT_TARGET)
2248 continue;
2249
2250 /* 2241 /*
2251 * If the port is not ONLINE then try to login 2242 * If the port is not ONLINE then try to login
2252 * to it if we haven't run out of retries. 2243 * to it if we haven't run out of retries.
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d2d68344065..971259032ef 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.05-k3" 10#define QLA2XXX_VERSION "8.01.07-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
14#define QLA_DRIVER_PATCH_VER 5 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 2e0f4a4076a..3f368c7d3ef 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -1106,7 +1106,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1106 1106
1107 probe_ent->irq = pdev->irq; 1107 probe_ent->irq = pdev->irq;
1108 probe_ent->irq_flags = IRQF_SHARED; 1108 probe_ent->irq_flags = IRQF_SHARED;
1109 probe_ent->mmio_base = port_base;
1110 probe_ent->private_data = hpriv; 1109 probe_ent->private_data = hpriv;
1111 1110
1112 hpriv->host_base = host_base; 1111 hpriv->host_base = host_base;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 03baec2191b..01d40369a8a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -74,6 +74,7 @@ enum {
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); 75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
77 78
78static const struct pci_device_id svia_pci_tbl[] = { 79static const struct pci_device_id svia_pci_tbl[] = {
79 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, 80 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
@@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = {
107 .bios_param = ata_std_bios_param, 108 .bios_param = ata_std_bios_param,
108}; 109};
109 110
110static const struct ata_port_operations svia_sata_ops = { 111static const struct ata_port_operations vt6420_sata_ops = {
112 .port_disable = ata_port_disable,
113
114 .tf_load = ata_tf_load,
115 .tf_read = ata_tf_read,
116 .check_status = ata_check_status,
117 .exec_command = ata_exec_command,
118 .dev_select = ata_std_dev_select,
119
120 .bmdma_setup = ata_bmdma_setup,
121 .bmdma_start = ata_bmdma_start,
122 .bmdma_stop = ata_bmdma_stop,
123 .bmdma_status = ata_bmdma_status,
124
125 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot,
127 .data_xfer = ata_pio_data_xfer,
128
129 .freeze = ata_bmdma_freeze,
130 .thaw = ata_bmdma_thaw,
131 .error_handler = vt6420_error_handler,
132 .post_internal_cmd = ata_bmdma_post_internal_cmd,
133
134 .irq_handler = ata_interrupt,
135 .irq_clear = ata_bmdma_irq_clear,
136
137 .port_start = ata_port_start,
138 .port_stop = ata_port_stop,
139 .host_stop = ata_host_stop,
140};
141
142static const struct ata_port_operations vt6421_sata_ops = {
111 .port_disable = ata_port_disable, 143 .port_disable = ata_port_disable,
112 144
113 .tf_load = ata_tf_load, 145 .tf_load = ata_tf_load,
@@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = {
141 .host_stop = ata_host_stop, 173 .host_stop = ata_host_stop,
142}; 174};
143 175
144static struct ata_port_info svia_port_info = { 176static struct ata_port_info vt6420_port_info = {
145 .sht = &svia_sht, 177 .sht = &svia_sht,
146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 178 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
147 .pio_mask = 0x1f, 179 .pio_mask = 0x1f,
148 .mwdma_mask = 0x07, 180 .mwdma_mask = 0x07,
149 .udma_mask = 0x7f, 181 .udma_mask = 0x7f,
150 .port_ops = &svia_sata_ops, 182 .port_ops = &vt6420_sata_ops,
151}; 183};
152 184
153MODULE_AUTHOR("Jeff Garzik"); 185MODULE_AUTHOR("Jeff Garzik");
@@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
170 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 202 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
171} 203}
172 204
205/**
206 * vt6420_prereset - prereset for vt6420
207 * @ap: target ATA port
208 *
209 * SCR registers on vt6420 are pieces of shit and may hang the
210 * whole machine completely if accessed with the wrong timing.
211 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
212 * access operations, but uses SStatus and SControl only during
213 * boot probing in controlled way.
214 *
215 * As the old (pre EH update) probing code is proven to work, we
216 * strictly follow the access pattern.
217 *
218 * LOCKING:
219 * Kernel thread context (may sleep)
220 *
221 * RETURNS:
222 * 0 on success, -errno otherwise.
223 */
224static int vt6420_prereset(struct ata_port *ap)
225{
226 struct ata_eh_context *ehc = &ap->eh_context;
227 unsigned long timeout = jiffies + (HZ * 5);
228 u32 sstatus, scontrol;
229 int online;
230
231 /* don't do any SCR stuff if we're not loading */
232 if (!ATA_PFLAG_LOADING)
233 goto skip_scr;
234
235 /* Resume phy. This is the old resume sequence from
236 * __sata_phy_reset().
237 */
238 svia_scr_write(ap, SCR_CONTROL, 0x300);
239 svia_scr_read(ap, SCR_CONTROL); /* flush */
240
241 /* wait for phy to become ready, if necessary */
242 do {
243 msleep(200);
244 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
245 break;
246 } while (time_before(jiffies, timeout));
247
248 /* open code sata_print_link_status() */
249 sstatus = svia_scr_read(ap, SCR_STATUS);
250 scontrol = svia_scr_read(ap, SCR_CONTROL);
251
252 online = (sstatus & 0xf) == 0x3;
253
254 ata_port_printk(ap, KERN_INFO,
255 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
256 online ? "up" : "down", sstatus, scontrol);
257
258 /* SStatus is read one more time */
259 svia_scr_read(ap, SCR_STATUS);
260
261 if (!online) {
262 /* tell EH to bail */
263 ehc->i.action &= ~ATA_EH_RESET_MASK;
264 return 0;
265 }
266
267 skip_scr:
268 /* wait for !BSY */
269 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
270
271 return 0;
272}
273
274static void vt6420_error_handler(struct ata_port *ap)
275{
276 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
277 NULL, ata_std_postreset);
278}
279
173static const unsigned int svia_bar_sizes[] = { 280static const unsigned int svia_bar_sizes[] = {
174 8, 4, 8, 4, 16, 256 281 8, 4, 8, 4, 16, 256
175}; 282};
@@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
210static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) 317static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
211{ 318{
212 struct ata_probe_ent *probe_ent; 319 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 320 struct ata_port_info *ppi = &vt6420_port_info;
214 321
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 322 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 323 if (!probe_ent)
@@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
239 346
240 probe_ent->sht = &svia_sht; 347 probe_ent->sht = &svia_sht;
241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 348 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
242 probe_ent->port_ops = &svia_sata_ops; 349 probe_ent->port_ops = &vt6421_sata_ops;
243 probe_ent->n_ports = N_PORTS; 350 probe_ent->n_ports = N_PORTS;
244 probe_ent->irq = pdev->irq; 351 probe_ent->irq = pdev->irq;
245 probe_ent->irq_flags = IRQF_SHARED; 352 probe_ent->irq_flags = IRQF_SHARED;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a5b731bd5b..a8ed5a22009 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
464 int cmnd_size, int timeout, int copy_sense)
464{ 465{
465 struct scsi_device *sdev = scmd->device; 466 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 467 struct Scsi_Host *shost = sdev->host;
@@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense
490 old_cmd_len = scmd->cmd_len; 491 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg; 492 old_use_sg = scmd->use_sg;
492 493
494 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
495 memcpy(scmd->cmnd, cmnd, cmnd_size);
496
493 if (copy_sense) { 497 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC; 498 int gfp_mask = GFP_ATOMIC;
495 499
@@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
610 static unsigned char generic_sense[6] = 614 static unsigned char generic_sense[6] =
611 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 615 {REQUEST_SENSE, 0, 0, 0, 252, 0};
612 616
613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 617 return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
615} 618}
616 619
617/** 620/**
@@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
736 int retry_cnt = 1, rtn; 739 int retry_cnt = 1, rtn;
737 740
738retry_tur: 741retry_tur:
739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 742 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
740
741
742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
743 743
744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
745 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
@@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
839 if (scmd->device->allow_restart) { 839 if (scmd->device->allow_restart) {
840 int rtn; 840 int rtn;
841 841
842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 842 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); 843 START_UNIT_TIMEOUT, 0);
844 if (rtn == SUCCESS) 844 if (rtn == SUCCESS)
845 return 0; 845 return 0;
846 } 846 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7b9e8fa1a4e..2ecd1418857 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "1.1-646"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone)
634} 635}
635 636
636static int 637static int
637iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) 638iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
638{ 639{
639 unsigned long flags; 640 unsigned long flags;
640 int rc; 641 int rc;
641 642
642 skb_get(skb); 643 skb_get(skb);
643 rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); 644 rc = netlink_broadcast(nls, skb, 0, 1, gfp);
644 if (rc < 0) { 645 if (rc < 0) {
645 mempool_free(skb, zone->pool); 646 mempool_free(skb, zone->pool);
646 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); 647 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
749 ev->r.connerror.cid = conn->cid; 750 ev->r.connerror.cid = conn->cid;
750 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 751 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
751 752
752 iscsi_broadcast_skb(conn->z_error, skb); 753 iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
753 754
754 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 755 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
755 error); 756 error);
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
895 * this will occur if the daemon is not up, so we just warn 896 * this will occur if the daemon is not up, so we just warn
896 * the user and when the daemon is restarted it will handle it 897 * the user and when the daemon is restarted it will handle it
897 */ 898 */
898 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 899 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
899 if (rc < 0) 900 if (rc < 0)
900 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 901 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
901 "session destruction event. Check iscsi daemon\n"); 902 "session destruction event. Check iscsi daemon\n");
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
958 * this will occur if the daemon is not up, so we just warn 959 * this will occur if the daemon is not up, so we just warn
959 * the user and when the daemon is restarted it will handle it 960 * the user and when the daemon is restarted it will handle it
960 */ 961 */
961 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 962 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
962 if (rc < 0) 963 if (rc < 0)
963 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 964 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
964 "session creation event. Check iscsi daemon\n"); 965 "session creation event. Check iscsi daemon\n");
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void)
1613{ 1614{
1614 int err; 1615 int err;
1615 1616
1617 printk(KERN_INFO "Loading iSCSI transport class v%s.",
1618 ISCSI_TRANSPORT_VERSION);
1619
1616 err = class_register(&iscsi_transport_class); 1620 err = class_register(&iscsi_transport_class);
1617 if (err) 1621 if (err)
1618 return err; 1622 return err;
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
1678 "Alex Aizman <itn780@yahoo.com>"); 1682 "Alex Aizman <itn780@yahoo.com>");
1679MODULE_DESCRIPTION("iSCSI Transport Interface"); 1683MODULE_DESCRIPTION("iSCSI Transport Interface");
1680MODULE_LICENSE("GPL"); 1684MODULE_LICENSE("GPL");
1685MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 65eef33846b..34f9343ed0a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21static int sg_version_num = 30533; /* 2 digits for each component */ 21static int sg_version_num = 30534; /* 2 digits for each component */
22#define SG_VERSION_STR "3.5.33" 22#define SG_VERSION_STR "3.5.34"
23 23
24/* 24/*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
60 60
61#ifdef CONFIG_SCSI_PROC_FS 61#ifdef CONFIG_SCSI_PROC_FS
62#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
63static char *sg_version_date = "20050908"; 63static char *sg_version_date = "20060818";
64 64
65static int sg_proc_init(void); 65static int sg_proc_init(void);
66static void sg_proc_cleanup(void); 66static void sg_proc_cleanup(void);
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1164 len = vma->vm_end - sa; 1164 len = vma->vm_end - sa;
1165 len = (len < sg->length) ? len : sg->length; 1165 len = (len < sg->length) ? len : sg->length;
1166 if (offset < len) { 1166 if (offset < len) {
1167 page = sg->page; 1167 page = virt_to_page(page_address(sg->page) + offset);
1168 get_page(page); /* increment page count */ 1168 get_page(page); /* increment page count */
1169 break; 1169 break;
1170 } 1170 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8c505076c0e..739d3ef46a4 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
2087 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2087 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a1d322f8a16..cd1979daf2b 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -936,6 +936,7 @@ enum pci_board_num_t {
936 pbn_b1_8_1382400, 936 pbn_b1_8_1382400,
937 937
938 pbn_b2_1_115200, 938 pbn_b2_1_115200,
939 pbn_b2_2_115200,
939 pbn_b2_8_115200, 940 pbn_b2_8_115200,
940 941
941 pbn_b2_1_460800, 942 pbn_b2_1_460800,
@@ -1243,6 +1244,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1243 .base_baud = 115200, 1244 .base_baud = 115200,
1244 .uart_offset = 8, 1245 .uart_offset = 8,
1245 }, 1246 },
1247 [pbn_b2_2_115200] = {
1248 .flags = FL_BASE2,
1249 .num_ports = 2,
1250 .base_baud = 115200,
1251 .uart_offset = 8,
1252 },
1246 [pbn_b2_8_115200] = { 1253 [pbn_b2_8_115200] = {
1247 .flags = FL_BASE2, 1254 .flags = FL_BASE2,
1248 .num_ports = 8, 1255 .num_ports = 8,
@@ -2340,6 +2347,13 @@ static struct pci_device_id serial_pci_tbl[] = {
2340 pbn_b0_1_115200 }, 2347 pbn_b0_1_115200 },
2341 2348
2342 /* 2349 /*
2350 * IntaShield IS-200
2351 */
2352 { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200,
2353 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */
2354 pbn_b2_2_115200 },
2355
2356 /*
2343 * These entries match devices with class COMMUNICATION_SERIAL, 2357 * These entries match devices with class COMMUNICATION_SERIAL,
2344 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 2358 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
2345 */ 2359 */
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index dc673e1b6fd..cfe20f73043 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options)
886 unsigned long flags; 886 unsigned long flags;
887 unsigned int baud, quot; 887 unsigned int baud, quot;
888 888
889 /*
890 * The console framework calls us for each and every port
891 * registered. Defer the console setup until the requested
892 * port has been properly discovered. A bit of a hack,
893 * though...
894 */
895 if (up->port.type != PORT_SUNSAB)
896 return -1;
897
889 printk("Console: ttyS%d (SAB82532)\n", 898 printk("Console: ttyS%d (SAB82532)\n",
890 (sunsab_reg.minor - 64) + con->index); 899 (sunsab_reg.minor - 64) + con->index);
891 900
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 47bc3d57e01..d34f336d53d 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1146 unsigned long flags; 1146 unsigned long flags;
1147 int baud, brg; 1147 int baud, brg;
1148 1148
1149 if (up->port.type != PORT_SUNZILOG)
1150 return -1;
1151
1149 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", 1152 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
1150 (sunzilog_reg.minor - 64) + con->index, con->index); 1153 (sunzilog_reg.minor - 64) + con->index, con->index);
1151 1154
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 822914e2f43..f7a975d5db0 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -110,7 +110,6 @@ static void au1xxx_start_ohc(struct platform_device *dev)
110 110
111 printk(KERN_DEBUG __FILE__ 111 printk(KERN_DEBUG __FILE__
112 ": Clock to USB host has been enabled \n"); 112 ": Clock to USB host has been enabled \n");
113#endif
114} 113}
115 114
116static void au1xxx_stop_ohc(struct platform_device *dev) 115static void au1xxx_stop_ohc(struct platform_device *dev)
diff --git a/drivers/usb/input/appletouch.c b/drivers/usb/input/appletouch.c
index 9e3f1390337..044faa07e29 100644
--- a/drivers/usb/input/appletouch.c
+++ b/drivers/usb/input/appletouch.c
@@ -597,9 +597,9 @@ static void atp_disconnect(struct usb_interface *iface)
597 if (dev) { 597 if (dev) {
598 usb_kill_urb(dev->urb); 598 usb_kill_urb(dev->urb);
599 input_unregister_device(dev->input); 599 input_unregister_device(dev->input);
600 usb_free_urb(dev->urb);
601 usb_buffer_free(dev->udev, dev->datalen, 600 usb_buffer_free(dev->udev, dev->datalen,
602 dev->data, dev->urb->transfer_dma); 601 dev->data, dev->urb->transfer_dma);
602 usb_free_urb(dev->urb);
603 kfree(dev); 603 kfree(dev);
604 } 604 }
605 printk(KERN_INFO "input: appletouch disconnected\n"); 605 printk(KERN_INFO "input: appletouch disconnected\n");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index a4062a6adbb..9c46746d5d0 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface,
208 /* allocate memory for our device state and initialize it */ 208 /* allocate memory for our device state and initialize it */
209 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 209 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
210 if (dev == NULL) { 210 if (dev == NULL) {
211 dev_err(&dev->udev->dev, "Out of memory!\n"); 211 dev_err(&interface->dev, "Out of memory!\n");
212 goto error; 212 goto error;
213 } 213 }
214 214
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 786e1dbe88e..983e104dd45 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1242,11 +1242,12 @@ done:
1242static int ctrl_out (struct usbtest_dev *dev, 1242static int ctrl_out (struct usbtest_dev *dev,
1243 unsigned count, unsigned length, unsigned vary) 1243 unsigned count, unsigned length, unsigned vary)
1244{ 1244{
1245 unsigned i, j, len, retval; 1245 unsigned i, j, len;
1246 int retval;
1246 u8 *buf; 1247 u8 *buf;
1247 char *what = "?"; 1248 char *what = "?";
1248 struct usb_device *udev; 1249 struct usb_device *udev;
1249 1250
1250 if (length < 1 || length > 0xffff || vary >= length) 1251 if (length < 1 || length > 0xffff || vary >= length)
1251 return -EINVAL; 1252 return -EINVAL;
1252 1253
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a20da8528a5..15945e806f0 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -306,6 +306,8 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
306 306
307 307
308static struct usb_device_id id_table_combined [] = { 308static struct usb_device_id id_table_combined [] = {
309 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
310 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
309 { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, 311 { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
310 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, 312 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
311 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, 313 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 9f7343a4542..8888cd80a49 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -32,6 +32,12 @@
32#define FTDI_NF_RIC_PID 0x0001 /* Product Id */ 32#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
33 33
34 34
35/* www.canusb.com Lawicel CANUSB device */
36#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
37
38/* AlphaMicro Components AMC-232USB01 device */
39#define FTDI_AMC232_PID 0xFF00 /* Product Id */
40
35/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */ 41/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
36#define FTDI_ACTZWAVE_PID 0xF2D0 42#define FTDI_ACTZWAVE_PID 0xF2D0
37 43
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 7e1bd5d6dfa..9840bade79f 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -251,6 +251,8 @@ static struct usb_device_id ipaq_id_table [] = {
251 { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ 251 { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */
252 { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ 252 { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */
253 { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ 253 { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */
254 { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */
255 { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */
254 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ 256 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */
255 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ 257 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */
256 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ 258 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index efbbc0adb89..65e4d046951 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -79,7 +79,6 @@ static struct usb_device_id id_table [] = {
79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, 79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, 80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, 81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
82 { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) },
83 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, 82 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
84 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, 83 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
85 { } /* Terminating entry */ 84 { } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a692ac66ca6..55195e76eb6 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -82,10 +82,6 @@
82#define SPEEDDRAGON_VENDOR_ID 0x0e55 82#define SPEEDDRAGON_VENDOR_ID 0x0e55
83#define SPEEDDRAGON_PRODUCT_ID 0x110b 83#define SPEEDDRAGON_PRODUCT_ID 0x110b
84 84
85/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */
86#define OTI_VENDOR_ID 0x0ea0
87#define OTI_PRODUCT_ID 0x6858
88
89/* DATAPILOT Universal-2 Phone Cable */ 85/* DATAPILOT Universal-2 Phone Cable */
90#define DATAPILOT_U2_VENDOR_ID 0x0731 86#define DATAPILOT_U2_VENDOR_ID 0x0731
91#define DATAPILOT_U2_PRODUCT_ID 0x2003 87#define DATAPILOT_U2_PRODUCT_ID 0x2003
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2793f9a912b..4a803d69fa3 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1240,6 +1240,16 @@ UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103,
1240 US_SC_DEVICE, US_PR_DEVICE, NULL, 1240 US_SC_DEVICE, US_PR_DEVICE, NULL,
1241 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64), 1241 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64),
1242 1242
1243/* David Kuehling <dvdkhlng@gmx.de>:
1244 * for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI
1245 * errors when trying to write.
1246 */
1247UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
1248 "C-MEX",
1249 "A-VOX",
1250 US_SC_DEVICE, US_PR_DEVICE, NULL,
1251 US_FL_IGNORE_RESIDUE ),
1252
1243/* Reported by Michael Stattmann <michael@stattmann.com> */ 1253/* Reported by Michael Stattmann <michael@stattmann.com> */
1244UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1254UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1245 "Sony Ericsson", 1255 "Sony Ericsson",
@@ -1251,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1251 * Tested on hardware version 1.10. 1261 * Tested on hardware version 1.10.
1252 * Entry is needed only for the initializer function override. 1262 * Entry is needed only for the initializer function override.
1253 */ 1263 */
1254UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, 1264UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110,
1255 "Desknote", 1265 "Desknote",
1256 "UCR-61S2B", 1266 "UCR-61S2B",
1257 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1267 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c40b9b8b1e7..702eb933cf8 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -554,7 +554,7 @@ config FB_VESA
554 554
555config FB_IMAC 555config FB_IMAC
556 bool "Intel-based Macintosh Framebuffer Support" 556 bool "Intel-based Macintosh Framebuffer Support"
557 depends on (FB = y) && X86 557 depends on (FB = y) && X86 && EFI
558 select FB_CFB_FILLRECT 558 select FB_CFB_FILLRECT
559 select FB_CFB_COPYAREA 559 select FB_CFB_COPYAREA
560 select FB_CFB_IMAGEBLIT 560 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 3e827e04a2a..106d428b72c 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1800,6 +1800,9 @@ static struct backlight_properties aty128_bl_data = {
1800 1800
1801static void aty128_bl_set_power(struct fb_info *info, int power) 1801static void aty128_bl_set_power(struct fb_info *info, int power)
1802{ 1802{
1803 if (info->bl_dev == NULL)
1804 return;
1805
1803 mutex_lock(&info->bl_mutex); 1806 mutex_lock(&info->bl_mutex);
1804 up(&info->bl_dev->sem); 1807 up(&info->bl_dev->sem);
1805 info->bl_dev->props->power = power; 1808 info->bl_dev->props->power = power;
@@ -1828,7 +1831,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
1828 bd = backlight_device_register(name, par, &aty128_bl_data); 1831 bd = backlight_device_register(name, par, &aty128_bl_data);
1829 if (IS_ERR(bd)) { 1832 if (IS_ERR(bd)) {
1830 info->bl_dev = NULL; 1833 info->bl_dev = NULL;
1831 printk("aty128: Backlight registration failed\n"); 1834 printk(KERN_WARNING "aty128: Backlight registration failed\n");
1832 goto error; 1835 goto error;
1833 } 1836 }
1834 1837
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 053ff63365b..510e4ea296e 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2199,6 +2199,9 @@ static struct backlight_properties aty_bl_data = {
2199 2199
2200static void aty_bl_set_power(struct fb_info *info, int power) 2200static void aty_bl_set_power(struct fb_info *info, int power)
2201{ 2201{
2202 if (info->bl_dev == NULL)
2203 return;
2204
2202 mutex_lock(&info->bl_mutex); 2205 mutex_lock(&info->bl_mutex);
2203 up(&info->bl_dev->sem); 2206 up(&info->bl_dev->sem);
2204 info->bl_dev->props->power = power; 2207 info->bl_dev->props->power = power;
@@ -2223,7 +2226,7 @@ static void aty_bl_init(struct atyfb_par *par)
2223 bd = backlight_device_register(name, par, &aty_bl_data); 2226 bd = backlight_device_register(name, par, &aty_bl_data);
2224 if (IS_ERR(bd)) { 2227 if (IS_ERR(bd)) {
2225 info->bl_dev = NULL; 2228 info->bl_dev = NULL;
2226 printk("aty: Backlight registration failed\n"); 2229 printk(KERN_WARNING "aty: Backlight registration failed\n");
2227 goto error; 2230 goto error;
2228 } 2231 }
2229 2232
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
index ff233b84dec..18ea4a54910 100644
--- a/drivers/video/imacfb.c
+++ b/drivers/video/imacfb.c
@@ -18,6 +18,8 @@
18#include <linux/screen_info.h> 18#include <linux/screen_info.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/dmi.h>
22#include <linux/efi.h>
21 23
22#include <asm/io.h> 24#include <asm/io.h>
23 25
@@ -28,7 +30,7 @@ typedef enum _MAC_TYPE {
28 M_I20, 30 M_I20,
29 M_MINI, 31 M_MINI,
30 M_MACBOOK, 32 M_MACBOOK,
31 M_NEW 33 M_UNKNOWN
32} MAC_TYPE; 34} MAC_TYPE;
33 35
34/* --------------------------------------------------------------------- */ 36/* --------------------------------------------------------------------- */
@@ -52,10 +54,36 @@ static struct fb_fix_screeninfo imacfb_fix __initdata = {
52}; 54};
53 55
54static int inverse; 56static int inverse;
55static int model = M_NEW; 57static int model = M_UNKNOWN;
56static int manual_height; 58static int manual_height;
57static int manual_width; 59static int manual_width;
58 60
61static int set_system(struct dmi_system_id *id)
62{
63 printk(KERN_INFO "imacfb: %s detected - set system to %ld\n",
64 id->ident, (long)id->driver_data);
65
66 model = (long)id->driver_data;
67
68 return 0;
69}
70
71static struct dmi_system_id __initdata dmi_system_table[] = {
72 { set_system, "iMac4,1", {
73 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
74 DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17},
75 { set_system, "MacBookPro1,1", {
76 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
77 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17},
78 { set_system, "MacBook1,1", {
79 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
80 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK},
81 { set_system, "Macmini1,1", {
82 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
83 DMI_MATCH(DMI_PRODUCT_NAME,"Macmini1,1")}, (void *)M_MINI},
84 {},
85};
86
59#define DEFAULT_FB_MEM 1024*1024*16 87#define DEFAULT_FB_MEM 1024*1024*16
60 88
61/* --------------------------------------------------------------------- */ 89/* --------------------------------------------------------------------- */
@@ -149,7 +177,6 @@ static int __init imacfb_probe(struct platform_device *dev)
149 screen_info.lfb_linelength = 1472 * 4; 177 screen_info.lfb_linelength = 1472 * 4;
150 screen_info.lfb_base = 0x80010000; 178 screen_info.lfb_base = 0x80010000;
151 break; 179 break;
152 case M_NEW:
153 case M_I20: 180 case M_I20:
154 screen_info.lfb_width = 1680; 181 screen_info.lfb_width = 1680;
155 screen_info.lfb_height = 1050; 182 screen_info.lfb_height = 1050;
@@ -207,6 +234,10 @@ static int __init imacfb_probe(struct platform_device *dev)
207 size_remap = size_total; 234 size_remap = size_total;
208 imacfb_fix.smem_len = size_remap; 235 imacfb_fix.smem_len = size_remap;
209 236
237#ifndef __i386__
238 screen_info.imacpm_seg = 0;
239#endif
240
210 if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) { 241 if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) {
211 printk(KERN_WARNING 242 printk(KERN_WARNING
212 "imacfb: cannot reserve video memory at 0x%lx\n", 243 "imacfb: cannot reserve video memory at 0x%lx\n",
@@ -324,8 +355,16 @@ static int __init imacfb_init(void)
324 int ret; 355 int ret;
325 char *option = NULL; 356 char *option = NULL;
326 357
327 /* ignore error return of fb_get_options */ 358 if (!efi_enabled)
328 fb_get_options("imacfb", &option); 359 return -ENODEV;
360 if (!dmi_check_system(dmi_system_table))
361 return -ENODEV;
362 if (model == M_UNKNOWN)
363 return -ENODEV;
364
365 if (fb_get_options("imacfb", &option))
366 return -ENODEV;
367
329 imacfb_setup(option); 368 imacfb_setup(option);
330 ret = platform_driver_register(&imacfb_driver); 369 ret = platform_driver_register(&imacfb_driver);
331 370
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 440272ad10e..7c76e079ca7 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
331 tmp |= M1064_XPIXCLKCTRL_PLL_UP; 331 tmp |= M1064_XPIXCLKCTRL_PLL_UP;
332 } 332 }
333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); 333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
334#ifdef __powerpc__
335 /* This is necessary to avoid jitter on PowerPC
336 * (OpenFirmware) systems, but apparently
337 * introduces jitter, at least on a x86-64
338 * using DVI.
339 * A simple workaround is disable for non-PPC.
340 */
334 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); 341 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0);
342#endif /* __powerpc__ */
335 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); 343 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl);
336 344
337 matroxfb_DAC_unlock_irqrestore(flags); 345 matroxfb_DAC_unlock_irqrestore(flags);
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index b45f577094a..14c37c42191 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -112,6 +112,8 @@ static struct backlight_properties nvidia_bl_data = {
112 112
113void nvidia_bl_set_power(struct fb_info *info, int power) 113void nvidia_bl_set_power(struct fb_info *info, int power)
114{ 114{
115 if (info->bl_dev == NULL)
116 return;
115 mutex_lock(&info->bl_mutex); 117 mutex_lock(&info->bl_mutex);
116 up(&info->bl_dev->sem); 118 up(&info->bl_dev->sem);
117 info->bl_dev->props->power = power; 119 info->bl_dev->props->power = power;
@@ -140,7 +142,7 @@ void nvidia_bl_init(struct nvidia_par *par)
140 bd = backlight_device_register(name, par, &nvidia_bl_data); 142 bd = backlight_device_register(name, par, &nvidia_bl_data);
141 if (IS_ERR(bd)) { 143 if (IS_ERR(bd)) {
142 info->bl_dev = NULL; 144 info->bl_dev = NULL;
143 printk("nvidia: Backlight registration failed\n"); 145 printk(KERN_WARNING "nvidia: Backlight registration failed\n");
144 goto error; 146 goto error;
145 } 147 }
146 148
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 9823ba939e1..511c362d3b0 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -354,6 +354,9 @@ static struct backlight_properties riva_bl_data = {
354 354
355static void riva_bl_set_power(struct fb_info *info, int power) 355static void riva_bl_set_power(struct fb_info *info, int power)
356{ 356{
357 if (info->bl_dev == NULL)
358 return;
359
357 mutex_lock(&info->bl_mutex); 360 mutex_lock(&info->bl_mutex);
358 up(&info->bl_dev->sem); 361 up(&info->bl_dev->sem);
359 info->bl_dev->props->power = power; 362 info->bl_dev->props->power = power;
@@ -382,7 +385,7 @@ static void riva_bl_init(struct riva_par *par)
382 bd = backlight_device_register(name, par, &riva_bl_data); 385 bd = backlight_device_register(name, par, &riva_bl_data);
383 if (IS_ERR(bd)) { 386 if (IS_ERR(bd)) {
384 info->bl_dev = NULL; 387 info->bl_dev = NULL;
385 printk("riva: Backlight registration failed\n"); 388 printk(KERN_WARNING "riva: Backlight registration failed\n");
386 goto error; 389 goto error;
387 } 390 }
388 391
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index ba1c88af49f..82011019494 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
308 if (adfs_checkmap(sb, dm)) 308 if (adfs_checkmap(sb, dm))
309 return dm; 309 return dm;
310 310
311 adfs_error(sb, NULL, "map corrupted"); 311 adfs_error(sb, "map corrupted");
312 312
313error_free: 313error_free:
314 while (--zone >= 0) 314 while (--zone >= 0)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 37534573960..045f98854f1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size)
884} 884}
885EXPORT_SYMBOL(bd_set_size); 885EXPORT_SYMBOL(bd_set_size);
886 886
887static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
888{
889 int ret = 0;
890 struct inode *bd_inode = bdev->bd_inode;
891 struct gendisk *disk = bdev->bd_disk;
892
893 mutex_lock_nested(&bdev->bd_mutex, subclass);
894 lock_kernel();
895 if (!--bdev->bd_openers) {
896 sync_blockdev(bdev);
897 kill_bdev(bdev);
898 }
899 if (bdev->bd_contains == bdev) {
900 if (disk->fops->release)
901 ret = disk->fops->release(bd_inode, NULL);
902 } else {
903 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
904 subclass + 1);
905 bdev->bd_contains->bd_part_count--;
906 mutex_unlock(&bdev->bd_contains->bd_mutex);
907 }
908 if (!bdev->bd_openers) {
909 struct module *owner = disk->fops->owner;
910
911 put_disk(disk);
912 module_put(owner);
913
914 if (bdev->bd_contains != bdev) {
915 kobject_put(&bdev->bd_part->kobj);
916 bdev->bd_part = NULL;
917 }
918 bdev->bd_disk = NULL;
919 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
920 if (bdev != bdev->bd_contains)
921 __blkdev_put(bdev->bd_contains, subclass + 1);
922 bdev->bd_contains = NULL;
923 }
924 unlock_kernel();
925 mutex_unlock(&bdev->bd_mutex);
926 bdput(bdev);
927 return ret;
928}
929
930int blkdev_put(struct block_device *bdev)
931{
932 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
933}
934EXPORT_SYMBOL(blkdev_put);
935
936int blkdev_put_partition(struct block_device *bdev)
937{
938 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
939}
940EXPORT_SYMBOL(blkdev_put_partition);
941
887static int 942static int
888blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); 943blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
889 944
@@ -980,7 +1035,7 @@ out_first:
980 bdev->bd_disk = NULL; 1035 bdev->bd_disk = NULL;
981 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1036 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
982 if (bdev != bdev->bd_contains) 1037 if (bdev != bdev->bd_contains)
983 blkdev_put(bdev->bd_contains); 1038 __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE);
984 bdev->bd_contains = NULL; 1039 bdev->bd_contains = NULL;
985 put_disk(disk); 1040 put_disk(disk);
986 module_put(owner); 1041 module_put(owner);
@@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1079 return res; 1134 return res;
1080} 1135}
1081 1136
1082static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
1083{
1084 int ret = 0;
1085 struct inode *bd_inode = bdev->bd_inode;
1086 struct gendisk *disk = bdev->bd_disk;
1087
1088 mutex_lock_nested(&bdev->bd_mutex, subclass);
1089 lock_kernel();
1090 if (!--bdev->bd_openers) {
1091 sync_blockdev(bdev);
1092 kill_bdev(bdev);
1093 }
1094 if (bdev->bd_contains == bdev) {
1095 if (disk->fops->release)
1096 ret = disk->fops->release(bd_inode, NULL);
1097 } else {
1098 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
1099 subclass + 1);
1100 bdev->bd_contains->bd_part_count--;
1101 mutex_unlock(&bdev->bd_contains->bd_mutex);
1102 }
1103 if (!bdev->bd_openers) {
1104 struct module *owner = disk->fops->owner;
1105
1106 put_disk(disk);
1107 module_put(owner);
1108
1109 if (bdev->bd_contains != bdev) {
1110 kobject_put(&bdev->bd_part->kobj);
1111 bdev->bd_part = NULL;
1112 }
1113 bdev->bd_disk = NULL;
1114 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1115 if (bdev != bdev->bd_contains)
1116 __blkdev_put(bdev->bd_contains, subclass + 1);
1117 bdev->bd_contains = NULL;
1118 }
1119 unlock_kernel();
1120 mutex_unlock(&bdev->bd_mutex);
1121 bdput(bdev);
1122 return ret;
1123}
1124
1125int blkdev_put(struct block_device *bdev)
1126{
1127 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
1128}
1129
1130EXPORT_SYMBOL(blkdev_put);
1131
1132int blkdev_put_partition(struct block_device *bdev)
1133{
1134 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
1135}
1136
1137EXPORT_SYMBOL(blkdev_put_partition);
1138
1139static int blkdev_close(struct inode * inode, struct file * filp) 1137static int blkdev_close(struct inode * inode, struct file * filp)
1140{ 1138{
1141 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1139 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index a61d17ed182..0feb3bd49cb 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,13 @@
1Version 1.45
2------------
3Do not time out lockw calls when using posix extensions. Do not
4time out requests if server still responding reasonably fast
5on requests on other threads. Improve POSIX locking emulation,
6(lock cancel now works, and unlock of merged range works even
7to Windows servers now). Fix oops on mount to lanman servers
8(win9x, os/2 etc.) when null password. Do not send listxattr
9(SMB to query all EAs) if nouser_xattr specified.
10
1Version 1.44 11Version 1.44
2------------ 12------------
3Rewritten sessionsetup support, including support for legacy SMB 13Rewritten sessionsetup support, including support for legacy SMB
diff --git a/fs/cifs/README b/fs/cifs/README
index 7986d0d97ac..5f0e1bd64fe 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -408,7 +408,7 @@ A partial list of the supported mount options follows:
408 user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended 408 user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended
409 attributes) to the server (default) e.g. via setfattr 409 attributes) to the server (default) e.g. via setfattr
410 and getfattr utilities. 410 and getfattr utilities.
411 nouser_xattr Do not allow getfattr/setfattr to get/set xattrs 411 nouser_xattr Do not allow getfattr/setfattr to get/set/list xattrs
412 mapchars Translate six of the seven reserved characters (not backslash) 412 mapchars Translate six of the seven reserved characters (not backslash)
413 *?<>|: 413 *?<>|:
414 to the remap range (above 0xF000), which also 414 to the remap range (above 0xF000), which also
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a89efaf78a2..4bc250b2d9f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -277,7 +277,8 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
277 return; 277 return;
278 278
279 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); 279 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
280 strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); 280 if(ses->password)
281 strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
281 282
282 if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) 283 if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
283 if(extended_security & CIFSSEC_MAY_PLNTXT) { 284 if(extended_security & CIFSSEC_MAY_PLNTXT) {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c28ede59994..3cd750029be 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -402,7 +402,6 @@ static struct quotactl_ops cifs_quotactl_ops = {
402}; 402};
403#endif 403#endif
404 404
405#ifdef CONFIG_CIFS_EXPERIMENTAL
406static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) 405static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
407{ 406{
408 struct cifs_sb_info *cifs_sb; 407 struct cifs_sb_info *cifs_sb;
@@ -422,7 +421,7 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
422 tcon->tidStatus = CifsExiting; 421 tcon->tidStatus = CifsExiting;
423 up(&tcon->tconSem); 422 up(&tcon->tconSem);
424 423
425 /* cancel_brl_requests(tcon); */ 424 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
426 /* cancel_notify_requests(tcon); */ 425 /* cancel_notify_requests(tcon); */
427 if(tcon->ses && tcon->ses->server) 426 if(tcon->ses && tcon->ses->server)
428 { 427 {
@@ -438,7 +437,6 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
438 437
439 return; 438 return;
440} 439}
441#endif
442 440
443static int cifs_remount(struct super_block *sb, int *flags, char *data) 441static int cifs_remount(struct super_block *sb, int *flags, char *data)
444{ 442{
@@ -457,9 +455,7 @@ struct super_operations cifs_super_ops = {
457 unless later we add lazy close of inodes or unless the kernel forgets to call 455 unless later we add lazy close of inodes or unless the kernel forgets to call
458 us with the same number of releases (closes) as opens */ 456 us with the same number of releases (closes) as opens */
459 .show_options = cifs_show_options, 457 .show_options = cifs_show_options,
460#ifdef CONFIG_CIFS_EXPERIMENTAL
461 .umount_begin = cifs_umount_begin, 458 .umount_begin = cifs_umount_begin,
462#endif
463 .remount_fs = cifs_remount, 459 .remount_fs = cifs_remount,
464}; 460};
465 461
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 8f75c6f2470..39ee8ef3bde 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -100,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
100extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 100extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
101extern int cifs_ioctl (struct inode * inode, struct file * filep, 101extern int cifs_ioctl (struct inode * inode, struct file * filep,
102 unsigned int command, unsigned long arg); 102 unsigned int command, unsigned long arg);
103#define CIFS_VERSION "1.44" 103#define CIFS_VERSION "1.45"
104#endif /* _CIFSFS_H */ 104#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6d7cf5f3bc0..b24006c47df 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2006 4 * Copyright (C) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org)
6 * 7 *
7 * This library is free software; you can redistribute it and/or modify 8 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 9 * it under the terms of the GNU Lesser General Public License as published
@@ -158,7 +159,8 @@ struct TCP_Server_Info {
158 /* 16th byte of RFC1001 workstation name is always null */ 159 /* 16th byte of RFC1001 workstation name is always null */
159 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 160 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
160 __u32 sequence_number; /* needed for CIFS PDU signature */ 161 __u32 sequence_number; /* needed for CIFS PDU signature */
161 char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; 162 char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
163 unsigned long lstrp; /* when we got last response from this server */
162}; 164};
163 165
164/* 166/*
@@ -266,14 +268,14 @@ struct cifsTconInfo {
266}; 268};
267 269
268/* 270/*
269 * This info hangs off the cifsFileInfo structure. This is used to track 271 * This info hangs off the cifsFileInfo structure, pointed to by llist.
270 * byte stream locks on the file 272 * This is used to track byte stream locks on the file
271 */ 273 */
272struct cifsLockInfo { 274struct cifsLockInfo {
273 struct cifsLockInfo *next; 275 struct list_head llist; /* pointer to next cifsLockInfo */
274 int start; 276 __u64 offset;
275 int length; 277 __u64 length;
276 int type; 278 __u8 type;
277}; 279};
278 280
279/* 281/*
@@ -304,6 +306,8 @@ struct cifsFileInfo {
304 /* lock scope id (0 if none) */ 306 /* lock scope id (0 if none) */
305 struct file * pfile; /* needed for writepage */ 307 struct file * pfile; /* needed for writepage */
306 struct inode * pInode; /* needed for oplock break */ 308 struct inode * pInode; /* needed for oplock break */
309 struct semaphore lock_sem;
310 struct list_head llist; /* list of byte range locks we have. */
307 unsigned closePend:1; /* file is marked to close */ 311 unsigned closePend:1; /* file is marked to close */
308 unsigned invalidHandle:1; /* file closed via session abend */ 312 unsigned invalidHandle:1; /* file closed via session abend */
309 atomic_t wrtPending; /* handle in use - defer close */ 313 atomic_t wrtPending; /* handle in use - defer close */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index a5ddc62d6fe..b35c55c3c8b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -50,6 +50,10 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, 50extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
51 struct kvec *, int /* nvec to send */, 51 struct kvec *, int /* nvec to send */,
52 int * /* type of buf returned */ , const int long_op); 52 int * /* type of buf returned */ , const int long_op);
53extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
54 struct smb_hdr * /* input */ ,
55 struct smb_hdr * /* out */ ,
56 int * /* bytes returned */);
53extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); 57extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
54extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); 58extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
55extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); 59extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 19678c575df..075d8fb3d37 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -477,7 +477,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
477 /* BB get server time for time conversions and add 477 /* BB get server time for time conversions and add
478 code to use it and timezone since this is not UTC */ 478 code to use it and timezone since this is not UTC */
479 479
480 if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { 480 if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
481 memcpy(server->cryptKey, rsp->EncryptionKey, 481 memcpy(server->cryptKey, rsp->EncryptionKey,
482 CIFS_CRYPTO_KEY_SIZE); 482 CIFS_CRYPTO_KEY_SIZE);
483 } else if (server->secMode & SECMODE_PW_ENCRYPT) { 483 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
@@ -1460,8 +1460,13 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
1460 pSMB->hdr.smb_buf_length += count; 1460 pSMB->hdr.smb_buf_length += count;
1461 pSMB->ByteCount = cpu_to_le16(count); 1461 pSMB->ByteCount = cpu_to_le16(count);
1462 1462
1463 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 1463 if (waitFlag) {
1464 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
1465 (struct smb_hdr *) pSMBr, &bytes_returned);
1466 } else {
1467 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
1464 (struct smb_hdr *) pSMBr, &bytes_returned, timeout); 1468 (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
1469 }
1465 cifs_stats_inc(&tcon->num_locks); 1470 cifs_stats_inc(&tcon->num_locks);
1466 if (rc) { 1471 if (rc) {
1467 cFYI(1, ("Send error in Lock = %d", rc)); 1472 cFYI(1, ("Send error in Lock = %d", rc));
@@ -1484,6 +1489,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1484 char *data_offset; 1489 char *data_offset;
1485 struct cifs_posix_lock *parm_data; 1490 struct cifs_posix_lock *parm_data;
1486 int rc = 0; 1491 int rc = 0;
1492 int timeout = 0;
1487 int bytes_returned = 0; 1493 int bytes_returned = 0;
1488 __u16 params, param_offset, offset, byte_count, count; 1494 __u16 params, param_offset, offset, byte_count, count;
1489 1495
@@ -1503,7 +1509,6 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1503 pSMB->MaxSetupCount = 0; 1509 pSMB->MaxSetupCount = 0;
1504 pSMB->Reserved = 0; 1510 pSMB->Reserved = 0;
1505 pSMB->Flags = 0; 1511 pSMB->Flags = 0;
1506 pSMB->Timeout = 0;
1507 pSMB->Reserved2 = 0; 1512 pSMB->Reserved2 = 0;
1508 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; 1513 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
1509 offset = param_offset + params; 1514 offset = param_offset + params;
@@ -1529,8 +1534,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1529 (((char *) &pSMB->hdr.Protocol) + offset); 1534 (((char *) &pSMB->hdr.Protocol) + offset);
1530 1535
1531 parm_data->lock_type = cpu_to_le16(lock_type); 1536 parm_data->lock_type = cpu_to_le16(lock_type);
1532 if(waitFlag) 1537 if(waitFlag) {
1538 timeout = 3; /* blocking operation, no timeout */
1533 parm_data->lock_flags = cpu_to_le16(1); 1539 parm_data->lock_flags = cpu_to_le16(1);
1540 pSMB->Timeout = cpu_to_le32(-1);
1541 } else
1542 pSMB->Timeout = 0;
1543
1534 parm_data->pid = cpu_to_le32(current->tgid); 1544 parm_data->pid = cpu_to_le32(current->tgid);
1535 parm_data->start = cpu_to_le64(pLockData->fl_start); 1545 parm_data->start = cpu_to_le64(pLockData->fl_start);
1536 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ 1546 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
@@ -1541,8 +1551,14 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1541 pSMB->Reserved4 = 0; 1551 pSMB->Reserved4 = 0;
1542 pSMB->hdr.smb_buf_length += byte_count; 1552 pSMB->hdr.smb_buf_length += byte_count;
1543 pSMB->ByteCount = cpu_to_le16(byte_count); 1553 pSMB->ByteCount = cpu_to_le16(byte_count);
1544 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, 1554 if (waitFlag) {
1545 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 1555 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
1556 (struct smb_hdr *) pSMBr, &bytes_returned);
1557 } else {
1558 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
1559 (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
1560 }
1561
1546 if (rc) { 1562 if (rc) {
1547 cFYI(1, ("Send error in Posix Lock = %d", rc)); 1563 cFYI(1, ("Send error in Posix Lock = %d", rc));
1548 } else if (get_flag) { 1564 } else if (get_flag) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 876eb9ef85f..5d394c72686 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -182,6 +182,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
182 182
183 while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) 183 while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood))
184 { 184 {
185 try_to_freeze();
185 if(server->protocolType == IPV6) { 186 if(server->protocolType == IPV6) {
186 rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket); 187 rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket);
187 } else { 188 } else {
@@ -612,6 +613,10 @@ multi_t2_fnd:
612#ifdef CONFIG_CIFS_STATS2 613#ifdef CONFIG_CIFS_STATS2
613 mid_entry->when_received = jiffies; 614 mid_entry->when_received = jiffies;
614#endif 615#endif
616 /* so we do not time out requests to server
617 which is still responding (since server could
618 be busy but not dead) */
619 server->lstrp = jiffies;
615 break; 620 break;
616 } 621 }
617 } 622 }
@@ -1266,33 +1271,35 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName)
1266 1271
1267 read_lock(&GlobalSMBSeslock); 1272 read_lock(&GlobalSMBSeslock);
1268 list_for_each(tmp, &GlobalTreeConnectionList) { 1273 list_for_each(tmp, &GlobalTreeConnectionList) {
1269 cFYI(1, ("Next tcon - ")); 1274 cFYI(1, ("Next tcon"));
1270 tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 1275 tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
1271 if (tcon->ses) { 1276 if (tcon->ses) {
1272 if (tcon->ses->server) { 1277 if (tcon->ses->server) {
1273 cFYI(1, 1278 cFYI(1,
1274 (" old ip addr: %x == new ip %x ?", 1279 ("old ip addr: %x == new ip %x ?",
1275 tcon->ses->server->addr.sockAddr.sin_addr. 1280 tcon->ses->server->addr.sockAddr.sin_addr.
1276 s_addr, new_target_ip_addr)); 1281 s_addr, new_target_ip_addr));
1277 if (tcon->ses->server->addr.sockAddr.sin_addr. 1282 if (tcon->ses->server->addr.sockAddr.sin_addr.
1278 s_addr == new_target_ip_addr) { 1283 s_addr == new_target_ip_addr) {
1279 /* BB lock tcon and server and tcp session and increment use count here? */ 1284 /* BB lock tcon, server and tcp session and increment use count here? */
1280 /* found a match on the TCP session */ 1285 /* found a match on the TCP session */
1281 /* BB check if reconnection needed */ 1286 /* BB check if reconnection needed */
1282 cFYI(1,("Matched ip, old UNC: %s == new: %s ?", 1287 cFYI(1,("IP match, old UNC: %s new: %s",
1283 tcon->treeName, uncName)); 1288 tcon->treeName, uncName));
1284 if (strncmp 1289 if (strncmp
1285 (tcon->treeName, uncName, 1290 (tcon->treeName, uncName,
1286 MAX_TREE_SIZE) == 0) { 1291 MAX_TREE_SIZE) == 0) {
1287 cFYI(1, 1292 cFYI(1,
1288 ("Matched UNC, old user: %s == new: %s ?", 1293 ("and old usr: %s new: %s",
1289 tcon->treeName, uncName)); 1294 tcon->treeName, uncName));
1290 if (strncmp 1295 if (strncmp
1291 (tcon->ses->userName, 1296 (tcon->ses->userName,
1292 userName, 1297 userName,
1293 MAX_USERNAME_SIZE) == 0) { 1298 MAX_USERNAME_SIZE) == 0) {
1294 read_unlock(&GlobalSMBSeslock); 1299 read_unlock(&GlobalSMBSeslock);
1295 return tcon;/* also matched user (smb session)*/ 1300 /* matched smb session
1301 (user name */
1302 return tcon;
1296 } 1303 }
1297 } 1304 }
1298 } 1305 }
@@ -1969,7 +1976,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1969 } 1976 }
1970 1977
1971 cFYI(1,("Negotiate caps 0x%x",(int)cap)); 1978 cFYI(1,("Negotiate caps 0x%x",(int)cap));
1972 1979#ifdef CONFIG_CIFS_DEBUG2
1980 if(cap & CIFS_UNIX_FCNTL_CAP)
1981 cFYI(1,("FCNTL cap"));
1982 if(cap & CIFS_UNIX_EXTATTR_CAP)
1983 cFYI(1,("EXTATTR cap"));
1984 if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
1985 cFYI(1,("POSIX path cap"));
1986 if(cap & CIFS_UNIX_XATTR_CAP)
1987 cFYI(1,("XATTR cap"));
1988 if(cap & CIFS_UNIX_POSIX_ACL_CAP)
1989 cFYI(1,("POSIX ACL cap"));
1990#endif /* CIFS_DEBUG2 */
1973 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 1991 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
1974 cFYI(1,("setting capabilities failed")); 1992 cFYI(1,("setting capabilities failed"));
1975 } 1993 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ba4cbe9b068..914239d5363 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -267,6 +267,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
267 pCifsFile->invalidHandle = FALSE; 267 pCifsFile->invalidHandle = FALSE;
268 pCifsFile->closePend = FALSE; 268 pCifsFile->closePend = FALSE;
269 init_MUTEX(&pCifsFile->fh_sem); 269 init_MUTEX(&pCifsFile->fh_sem);
270 init_MUTEX(&pCifsFile->lock_sem);
271 INIT_LIST_HEAD(&pCifsFile->llist);
272 atomic_set(&pCifsFile->wrtPending,0);
273
270 /* set the following in open now 274 /* set the following in open now
271 pCifsFile->pfile = file; */ 275 pCifsFile->pfile = file; */
272 write_lock(&GlobalSMBSeslock); 276 write_lock(&GlobalSMBSeslock);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 944d2b9e092..e9c5ba9084f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003 6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
8 * 9 *
9 * This library is free software; you can redistribute it and/or modify 10 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published 11 * it under the terms of the GNU Lesser General Public License as published
@@ -47,6 +48,8 @@ static inline struct cifsFileInfo *cifs_init_private(
47 private_data->netfid = netfid; 48 private_data->netfid = netfid;
48 private_data->pid = current->tgid; 49 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem); 50 init_MUTEX(&private_data->fh_sem);
51 init_MUTEX(&private_data->lock_sem);
52 INIT_LIST_HEAD(&private_data->llist);
50 private_data->pfile = file; /* needed for writepage */ 53 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode; 54 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE; 55 private_data->invalidHandle = FALSE;
@@ -473,6 +476,8 @@ int cifs_close(struct inode *inode, struct file *file)
473 cifs_sb = CIFS_SB(inode->i_sb); 476 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon; 477 pTcon = cifs_sb->tcon;
475 if (pSMBFile) { 478 if (pSMBFile) {
479 struct cifsLockInfo *li, *tmp;
480
476 pSMBFile->closePend = TRUE; 481 pSMBFile->closePend = TRUE;
477 if (pTcon) { 482 if (pTcon) {
478 /* no sense reconnecting to close a file that is 483 /* no sense reconnecting to close a file that is
@@ -496,6 +501,16 @@ int cifs_close(struct inode *inode, struct file *file)
496 pSMBFile->netfid); 501 pSMBFile->netfid);
497 } 502 }
498 } 503 }
504
505 /* Delete any outstanding lock records.
506 We'll lose them when the file is closed anyway. */
507 down(&pSMBFile->lock_sem);
508 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
509 list_del(&li->llist);
510 kfree(li);
511 }
512 up(&pSMBFile->lock_sem);
513
499 write_lock(&GlobalSMBSeslock); 514 write_lock(&GlobalSMBSeslock);
500 list_del(&pSMBFile->flist); 515 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist); 516 list_del(&pSMBFile->tlist);
@@ -570,6 +585,21 @@ int cifs_closedir(struct inode *inode, struct file *file)
570 return rc; 585 return rc;
571} 586}
572 587
588static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
589 __u64 offset, __u8 lockType)
590{
591 struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
592 if (li == NULL)
593 return -ENOMEM;
594 li->offset = offset;
595 li->length = len;
596 li->type = lockType;
597 down(&fid->lock_sem);
598 list_add(&li->llist, &fid->llist);
599 up(&fid->lock_sem);
600 return 0;
601}
602
573int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) 603int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
574{ 604{
575 int rc, xid; 605 int rc, xid;
@@ -581,6 +611,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
581 struct cifsTconInfo *pTcon; 611 struct cifsTconInfo *pTcon;
582 __u16 netfid; 612 __u16 netfid;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES; 613 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
614 int posix_locking;
584 615
585 length = 1 + pfLock->fl_end - pfLock->fl_start; 616 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES; 617 rc = -EACCES;
@@ -639,15 +670,14 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
639 } 670 }
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 671 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
641 672
673 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
674 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
642 675
643 /* BB add code here to normalize offset and length to 676 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the 677 account for negative length which we can not accept over the
645 wire */ 678 wire */
646 if (IS_GETLK(cmd)) { 679 if (IS_GETLK(cmd)) {
647 if(experimEnabled && 680 if(posix_locking) {
648 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
649 (CIFS_UNIX_FCNTL_CAP &
650 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
651 int posix_lock_type; 681 int posix_lock_type;
652 if(lockType & LOCKING_ANDX_SHARED_LOCK) 682 if(lockType & LOCKING_ANDX_SHARED_LOCK)
653 posix_lock_type = CIFS_RDLCK; 683 posix_lock_type = CIFS_RDLCK;
@@ -683,10 +713,15 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
683 FreeXid(xid); 713 FreeXid(xid);
684 return rc; 714 return rc;
685 } 715 }
686 if (experimEnabled && 716
687 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && 717 if (!numLock && !numUnlock) {
688 (CIFS_UNIX_FCNTL_CAP & 718 /* if no lock or unlock then nothing
689 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { 719 to do since we do not know what it is */
720 FreeXid(xid);
721 return -EOPNOTSUPP;
722 }
723
724 if (posix_locking) {
690 int posix_lock_type; 725 int posix_lock_type;
691 if(lockType & LOCKING_ANDX_SHARED_LOCK) 726 if(lockType & LOCKING_ANDX_SHARED_LOCK)
692 posix_lock_type = CIFS_RDLCK; 727 posix_lock_type = CIFS_RDLCK;
@@ -695,18 +730,46 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
695 730
696 if(numUnlock == 1) 731 if(numUnlock == 1)
697 posix_lock_type = CIFS_UNLCK; 732 posix_lock_type = CIFS_UNLCK;
698 else if(numLock == 0) { 733
699 /* if no lock or unlock then nothing
700 to do since we do not know what it is */
701 FreeXid(xid);
702 return -EOPNOTSUPP;
703 }
704 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, 734 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
705 length, pfLock, 735 length, pfLock,
706 posix_lock_type, wait_flag); 736 posix_lock_type, wait_flag);
707 } else 737 } else {
708 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, 738 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
709 numUnlock, numLock, lockType, wait_flag); 739
740 if (numLock) {
741 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
742 0, numLock, lockType, wait_flag);
743
744 if (rc == 0) {
745 /* For Windows locks we must store them. */
746 rc = store_file_lock(fid, length,
747 pfLock->fl_start, lockType);
748 }
749 } else if (numUnlock) {
750 /* For each stored lock that this unlock overlaps
751 completely, unlock it. */
752 int stored_rc = 0;
753 struct cifsLockInfo *li, *tmp;
754
755 down(&fid->lock_sem);
756 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
757 if (pfLock->fl_start <= li->offset &&
758 length >= li->length) {
759 stored_rc = CIFSSMBLock(xid, pTcon, netfid,
760 li->length, li->offset,
761 1, 0, li->type, FALSE);
762 if (stored_rc)
763 rc = stored_rc;
764
765 list_del(&li->llist);
766 kfree(li);
767 }
768 }
769 up(&fid->lock_sem);
770 }
771 }
772
710 if (pfLock->fl_flags & FL_POSIX) 773 if (pfLock->fl_flags & FL_POSIX)
711 posix_lock_file_wait(file, pfLock); 774 posix_lock_file_wait(file, pfLock);
712 FreeXid(xid); 775 FreeXid(xid);
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b66eff5dc62..ce87550e918 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -72,6 +72,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
72 {ERRinvlevel,-EOPNOTSUPP}, 72 {ERRinvlevel,-EOPNOTSUPP},
73 {ERRdirnotempty, -ENOTEMPTY}, 73 {ERRdirnotempty, -ENOTEMPTY},
74 {ERRnotlocked, -ENOLCK}, 74 {ERRnotlocked, -ENOLCK},
75 {ERRcancelviolation, -ENOLCK},
75 {ERRalreadyexists, -EEXIST}, 76 {ERRalreadyexists, -EEXIST},
76 {ERRmoredata, -EOVERFLOW}, 77 {ERRmoredata, -EOVERFLOW},
77 {ERReasnotsupported,-EOPNOTSUPP}, 78 {ERReasnotsupported,-EOPNOTSUPP},
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 03bbcb37791..105761e3ba0 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -556,7 +556,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
556 FIND_FILE_STANDARD_INFO * pFindData = 556 FIND_FILE_STANDARD_INFO * pFindData =
557 (FIND_FILE_STANDARD_INFO *)current_entry; 557 (FIND_FILE_STANDARD_INFO *)current_entry;
558 filename = &pFindData->FileName[0]; 558 filename = &pFindData->FileName[0];
559 len = le32_to_cpu(pFindData->FileNameLength); 559 len = pFindData->FileNameLength;
560 } else { 560 } else {
561 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); 561 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
562 } 562 }
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7202d534ef0..d1705ab8136 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -372,7 +372,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
372 372
373 /* no capabilities flags in old lanman negotiation */ 373 /* no capabilities flags in old lanman negotiation */
374 374
375 pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE; 375 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
376 /* BB calculate hash with password */ 376 /* BB calculate hash with password */
377 /* and copy into bcc */ 377 /* and copy into bcc */
378 378
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index cd41c67ff8d..212c3c29640 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -95,6 +95,7 @@
95#define ERRinvlevel 124 95#define ERRinvlevel 124
96#define ERRdirnotempty 145 96#define ERRdirnotempty 145
97#define ERRnotlocked 158 97#define ERRnotlocked 158
98#define ERRcancelviolation 173
98#define ERRalreadyexists 183 99#define ERRalreadyexists 183
99#define ERRbadpipe 230 100#define ERRbadpipe 230
100#define ERRpipebusy 231 101#define ERRpipebusy 231
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 17ba329e2b3..48d47b46b1f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2005 4 * Copyright (C) International Business Machines Corp., 2002,2005
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 * Jeremy Allison (jra@samba.org) 2006.
7 *
7 * This library is free software; you can redistribute it and/or modify 8 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 9 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or 10 * by the Free Software Foundation; either version 2.1 of the License, or
@@ -36,7 +37,7 @@ extern mempool_t *cifs_mid_poolp;
36extern kmem_cache_t *cifs_oplock_cachep; 37extern kmem_cache_t *cifs_oplock_cachep;
37 38
38static struct mid_q_entry * 39static struct mid_q_entry *
39AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) 40AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
40{ 41{
41 struct mid_q_entry *temp; 42 struct mid_q_entry *temp;
42 43
@@ -203,6 +204,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
203 rc = 0; 204 rc = 0;
204 } 205 }
205 206
207 /* Don't want to modify the buffer as a
208 side effect of this call. */
209 smb_buffer->smb_buf_length = smb_buf_length;
210
206 return rc; 211 return rc;
207} 212}
208 213
@@ -217,6 +222,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
217 unsigned int len = iov[0].iov_len; 222 unsigned int len = iov[0].iov_len;
218 unsigned int total_len; 223 unsigned int total_len;
219 int first_vec = 0; 224 int first_vec = 0;
225 unsigned int smb_buf_length = smb_buffer->smb_buf_length;
220 226
221 if(ssocket == NULL) 227 if(ssocket == NULL)
222 return -ENOTSOCK; /* BB eventually add reconnect code here */ 228 return -ENOTSOCK; /* BB eventually add reconnect code here */
@@ -293,36 +299,15 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
293 } else 299 } else
294 rc = 0; 300 rc = 0;
295 301
302 /* Don't want to modify the buffer as a
303 side effect of this call. */
304 smb_buffer->smb_buf_length = smb_buf_length;
305
296 return rc; 306 return rc;
297} 307}
298 308
299int 309static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
300SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
301 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
302 const int long_op)
303{ 310{
304 int rc = 0;
305 unsigned int receive_len;
306 unsigned long timeout;
307 struct mid_q_entry *midQ;
308 struct smb_hdr *in_buf = iov[0].iov_base;
309
310 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
311
312 if ((ses == NULL) || (ses->server == NULL)) {
313 cifs_small_buf_release(in_buf);
314 cERROR(1,("Null session"));
315 return -EIO;
316 }
317
318 if(ses->server->tcpStatus == CifsExiting) {
319 cifs_small_buf_release(in_buf);
320 return -ENOENT;
321 }
322
323 /* Ensure that we do not send more than 50 overlapping requests
324 to the same server. We may make this configurable later or
325 use ses->maxReq */
326 if(long_op == -1) { 311 if(long_op == -1) {
327 /* oplock breaks must not be held up */ 312 /* oplock breaks must not be held up */
328 atomic_inc(&ses->server->inFlight); 313 atomic_inc(&ses->server->inFlight);
@@ -345,53 +330,140 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
345 } else { 330 } else {
346 if(ses->server->tcpStatus == CifsExiting) { 331 if(ses->server->tcpStatus == CifsExiting) {
347 spin_unlock(&GlobalMid_Lock); 332 spin_unlock(&GlobalMid_Lock);
348 cifs_small_buf_release(in_buf);
349 return -ENOENT; 333 return -ENOENT;
350 } 334 }
351 335
352 /* can not count locking commands against total since 336 /* can not count locking commands against total since
353 they are allowed to block on server */ 337 they are allowed to block on server */
354 338
355 if(long_op < 3) {
356 /* update # of requests on the wire to server */ 339 /* update # of requests on the wire to server */
340 if (long_op < 3)
357 atomic_inc(&ses->server->inFlight); 341 atomic_inc(&ses->server->inFlight);
358 }
359 spin_unlock(&GlobalMid_Lock); 342 spin_unlock(&GlobalMid_Lock);
360 break; 343 break;
361 } 344 }
362 } 345 }
363 } 346 }
364 /* make sure that we sign in the same order that we send on this socket 347 return 0;
365 and avoid races inside tcp sendmsg code that could cause corruption 348}
366 of smb data */
367
368 down(&ses->server->tcpSem);
369 349
350static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
351 struct mid_q_entry **ppmidQ)
352{
370 if (ses->server->tcpStatus == CifsExiting) { 353 if (ses->server->tcpStatus == CifsExiting) {
371 rc = -ENOENT; 354 return -ENOENT;
372 goto out_unlock2;
373 } else if (ses->server->tcpStatus == CifsNeedReconnect) { 355 } else if (ses->server->tcpStatus == CifsNeedReconnect) {
374 cFYI(1,("tcp session dead - return to caller to retry")); 356 cFYI(1,("tcp session dead - return to caller to retry"));
375 rc = -EAGAIN; 357 return -EAGAIN;
376 goto out_unlock2;
377 } else if (ses->status != CifsGood) { 358 } else if (ses->status != CifsGood) {
378 /* check if SMB session is bad because we are setting it up */ 359 /* check if SMB session is bad because we are setting it up */
379 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 360 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
380 (in_buf->Command != SMB_COM_NEGOTIATE)) { 361 (in_buf->Command != SMB_COM_NEGOTIATE)) {
381 rc = -EAGAIN; 362 return -EAGAIN;
382 goto out_unlock2;
383 } /* else ok - we are setting up session */ 363 } /* else ok - we are setting up session */
384 } 364 }
385 midQ = AllocMidQEntry(in_buf, ses); 365 *ppmidQ = AllocMidQEntry(in_buf, ses);
386 if (midQ == NULL) { 366 if (*ppmidQ == NULL) {
367 return -ENOMEM;
368 }
369 return 0;
370}
371
372static int wait_for_response(struct cifsSesInfo *ses,
373 struct mid_q_entry *midQ,
374 unsigned long timeout,
375 unsigned long time_to_wait)
376{
377 unsigned long curr_timeout;
378
379 for (;;) {
380 curr_timeout = timeout + jiffies;
381 wait_event(ses->server->response_q,
382 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
383 time_after(jiffies, curr_timeout) ||
384 ((ses->server->tcpStatus != CifsGood) &&
385 (ses->server->tcpStatus != CifsNew)));
386
387 if (time_after(jiffies, curr_timeout) &&
388 (midQ->midState == MID_REQUEST_SUBMITTED) &&
389 ((ses->server->tcpStatus == CifsGood) ||
390 (ses->server->tcpStatus == CifsNew))) {
391
392 unsigned long lrt;
393
394 /* We timed out. Is the server still
395 sending replies ? */
396 spin_lock(&GlobalMid_Lock);
397 lrt = ses->server->lstrp;
398 spin_unlock(&GlobalMid_Lock);
399
400 /* Calculate time_to_wait past last receive time.
401 Although we prefer not to time out if the
402 server is still responding - we will time
403 out if the server takes more than 15 (or 45
404 or 180) seconds to respond to this request
405 and has not responded to any request from
406 other threads on the client within 10 seconds */
407 lrt += time_to_wait;
408 if (time_after(jiffies, lrt)) {
409 /* No replies for time_to_wait. */
410 cERROR(1,("server not responding"));
411 return -1;
412 }
413 } else {
414 return 0;
415 }
416 }
417}
418
419int
420SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
421 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
422 const int long_op)
423{
424 int rc = 0;
425 unsigned int receive_len;
426 unsigned long timeout;
427 struct mid_q_entry *midQ;
428 struct smb_hdr *in_buf = iov[0].iov_base;
429
430 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
431
432 if ((ses == NULL) || (ses->server == NULL)) {
433 cifs_small_buf_release(in_buf);
434 cERROR(1,("Null session"));
435 return -EIO;
436 }
437
438 if(ses->server->tcpStatus == CifsExiting) {
439 cifs_small_buf_release(in_buf);
440 return -ENOENT;
441 }
442
443 /* Ensure that we do not send more than 50 overlapping requests
444 to the same server. We may make this configurable later or
445 use ses->maxReq */
446
447 rc = wait_for_free_request(ses, long_op);
448 if (rc) {
449 cifs_small_buf_release(in_buf);
450 return rc;
451 }
452
453 /* make sure that we sign in the same order that we send on this socket
454 and avoid races inside tcp sendmsg code that could cause corruption
455 of smb data */
456
457 down(&ses->server->tcpSem);
458
459 rc = allocate_mid(ses, in_buf, &midQ);
460 if (rc) {
387 up(&ses->server->tcpSem); 461 up(&ses->server->tcpSem);
388 cifs_small_buf_release(in_buf); 462 cifs_small_buf_release(in_buf);
389 /* If not lock req, update # of requests on wire to server */ 463 /* Update # of requests on wire to server */
390 if(long_op < 3) { 464 atomic_dec(&ses->server->inFlight);
391 atomic_dec(&ses->server->inFlight); 465 wake_up(&ses->server->request_q);
392 wake_up(&ses->server->request_q); 466 return rc;
393 }
394 return -ENOMEM;
395 } 467 }
396 468
397 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); 469 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
@@ -406,32 +478,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
406 atomic_dec(&ses->server->inSend); 478 atomic_dec(&ses->server->inSend);
407 midQ->when_sent = jiffies; 479 midQ->when_sent = jiffies;
408#endif 480#endif
409 if(rc < 0) { 481
410 DeleteMidQEntry(midQ); 482 up(&ses->server->tcpSem);
411 up(&ses->server->tcpSem); 483 cifs_small_buf_release(in_buf);
412 cifs_small_buf_release(in_buf); 484
413 /* If not lock req, update # of requests on wire to server */ 485 if(rc < 0)
414 if(long_op < 3) { 486 goto out;
415 atomic_dec(&ses->server->inFlight);
416 wake_up(&ses->server->request_q);
417 }
418 return rc;
419 } else {
420 up(&ses->server->tcpSem);
421 cifs_small_buf_release(in_buf);
422 }
423 487
424 if (long_op == -1) 488 if (long_op == -1)
425 goto cifs_no_response_exit2; 489 goto out;
426 else if (long_op == 2) /* writes past end of file can take loong time */ 490 else if (long_op == 2) /* writes past end of file can take loong time */
427 timeout = 180 * HZ; 491 timeout = 180 * HZ;
428 else if (long_op == 1) 492 else if (long_op == 1)
429 timeout = 45 * HZ; /* should be greater than 493 timeout = 45 * HZ; /* should be greater than
430 servers oplock break timeout (about 43 seconds) */ 494 servers oplock break timeout (about 43 seconds) */
431 else if (long_op > 2) { 495 else
432 timeout = MAX_SCHEDULE_TIMEOUT;
433 } else
434 timeout = 15 * HZ; 496 timeout = 15 * HZ;
497
435 /* wait for 15 seconds or until woken up due to response arriving or 498 /* wait for 15 seconds or until woken up due to response arriving or
436 due to last connection to this server being unmounted */ 499 due to last connection to this server being unmounted */
437 if (signal_pending(current)) { 500 if (signal_pending(current)) {
@@ -441,19 +504,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
441 } 504 }
442 505
443 /* No user interrupts in wait - wreaks havoc with performance */ 506 /* No user interrupts in wait - wreaks havoc with performance */
444 if(timeout != MAX_SCHEDULE_TIMEOUT) { 507 wait_for_response(ses, midQ, timeout, 10 * HZ);
445 timeout += jiffies;
446 wait_event(ses->server->response_q,
447 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
448 time_after(jiffies, timeout) ||
449 ((ses->server->tcpStatus != CifsGood) &&
450 (ses->server->tcpStatus != CifsNew)));
451 } else {
452 wait_event(ses->server->response_q,
453 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
454 ((ses->server->tcpStatus != CifsGood) &&
455 (ses->server->tcpStatus != CifsNew)));
456 }
457 508
458 spin_lock(&GlobalMid_Lock); 509 spin_lock(&GlobalMid_Lock);
459 if (midQ->resp_buf) { 510 if (midQ->resp_buf) {
@@ -481,11 +532,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
481 } 532 }
482 spin_unlock(&GlobalMid_Lock); 533 spin_unlock(&GlobalMid_Lock);
483 DeleteMidQEntry(midQ); 534 DeleteMidQEntry(midQ);
484 /* If not lock req, update # of requests on wire to server */ 535 /* Update # of requests on wire to server */
485 if(long_op < 3) { 536 atomic_dec(&ses->server->inFlight);
486 atomic_dec(&ses->server->inFlight); 537 wake_up(&ses->server->request_q);
487 wake_up(&ses->server->request_q);
488 }
489 return rc; 538 return rc;
490 } 539 }
491 540
@@ -536,24 +585,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
536 cFYI(1,("Bad MID state?")); 585 cFYI(1,("Bad MID state?"));
537 } 586 }
538 } 587 }
539cifs_no_response_exit2:
540 DeleteMidQEntry(midQ);
541
542 if(long_op < 3) {
543 atomic_dec(&ses->server->inFlight);
544 wake_up(&ses->server->request_q);
545 }
546 588
547 return rc; 589out:
548 590
549out_unlock2: 591 DeleteMidQEntry(midQ);
550 up(&ses->server->tcpSem); 592 atomic_dec(&ses->server->inFlight);
551 cifs_small_buf_release(in_buf); 593 wake_up(&ses->server->request_q);
552 /* If not lock req, update # of requests on wire to server */
553 if(long_op < 3) {
554 atomic_dec(&ses->server->inFlight);
555 wake_up(&ses->server->request_q);
556 }
557 594
558 return rc; 595 return rc;
559} 596}
@@ -583,85 +620,34 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
583 /* Ensure that we do not send more than 50 overlapping requests 620 /* Ensure that we do not send more than 50 overlapping requests
584 to the same server. We may make this configurable later or 621 to the same server. We may make this configurable later or
585 use ses->maxReq */ 622 use ses->maxReq */
586 if(long_op == -1) {
587 /* oplock breaks must not be held up */
588 atomic_inc(&ses->server->inFlight);
589 } else {
590 spin_lock(&GlobalMid_Lock);
591 while(1) {
592 if(atomic_read(&ses->server->inFlight) >=
593 cifs_max_pending){
594 spin_unlock(&GlobalMid_Lock);
595#ifdef CONFIG_CIFS_STATS2
596 atomic_inc(&ses->server->num_waiters);
597#endif
598 wait_event(ses->server->request_q,
599 atomic_read(&ses->server->inFlight)
600 < cifs_max_pending);
601#ifdef CONFIG_CIFS_STATS2
602 atomic_dec(&ses->server->num_waiters);
603#endif
604 spin_lock(&GlobalMid_Lock);
605 } else {
606 if(ses->server->tcpStatus == CifsExiting) {
607 spin_unlock(&GlobalMid_Lock);
608 return -ENOENT;
609 }
610 623
611 /* can not count locking commands against total since 624 rc = wait_for_free_request(ses, long_op);
612 they are allowed to block on server */ 625 if (rc)
613 626 return rc;
614 if(long_op < 3) { 627
615 /* update # of requests on the wire to server */
616 atomic_inc(&ses->server->inFlight);
617 }
618 spin_unlock(&GlobalMid_Lock);
619 break;
620 }
621 }
622 }
623 /* make sure that we sign in the same order that we send on this socket 628 /* make sure that we sign in the same order that we send on this socket
624 and avoid races inside tcp sendmsg code that could cause corruption 629 and avoid races inside tcp sendmsg code that could cause corruption
625 of smb data */ 630 of smb data */
626 631
627 down(&ses->server->tcpSem); 632 down(&ses->server->tcpSem);
628 633
629 if (ses->server->tcpStatus == CifsExiting) { 634 rc = allocate_mid(ses, in_buf, &midQ);
630 rc = -ENOENT; 635 if (rc) {
631 goto out_unlock;
632 } else if (ses->server->tcpStatus == CifsNeedReconnect) {
633 cFYI(1,("tcp session dead - return to caller to retry"));
634 rc = -EAGAIN;
635 goto out_unlock;
636 } else if (ses->status != CifsGood) {
637 /* check if SMB session is bad because we are setting it up */
638 if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
639 (in_buf->Command != SMB_COM_NEGOTIATE)) {
640 rc = -EAGAIN;
641 goto out_unlock;
642 } /* else ok - we are setting up session */
643 }
644 midQ = AllocMidQEntry(in_buf, ses);
645 if (midQ == NULL) {
646 up(&ses->server->tcpSem); 636 up(&ses->server->tcpSem);
647 /* If not lock req, update # of requests on wire to server */ 637 /* Update # of requests on wire to server */
648 if(long_op < 3) { 638 atomic_dec(&ses->server->inFlight);
649 atomic_dec(&ses->server->inFlight); 639 wake_up(&ses->server->request_q);
650 wake_up(&ses->server->request_q); 640 return rc;
651 }
652 return -ENOMEM;
653 } 641 }
654 642
655 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 643 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
656 up(&ses->server->tcpSem);
657 cERROR(1, ("Illegal length, greater than maximum frame, %d", 644 cERROR(1, ("Illegal length, greater than maximum frame, %d",
658 in_buf->smb_buf_length)); 645 in_buf->smb_buf_length));
659 DeleteMidQEntry(midQ); 646 DeleteMidQEntry(midQ);
660 /* If not lock req, update # of requests on wire to server */ 647 up(&ses->server->tcpSem);
661 if(long_op < 3) { 648 /* Update # of requests on wire to server */
662 atomic_dec(&ses->server->inFlight); 649 atomic_dec(&ses->server->inFlight);
663 wake_up(&ses->server->request_q); 650 wake_up(&ses->server->request_q);
664 }
665 return -EIO; 651 return -EIO;
666 } 652 }
667 653
@@ -677,27 +663,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
677 atomic_dec(&ses->server->inSend); 663 atomic_dec(&ses->server->inSend);
678 midQ->when_sent = jiffies; 664 midQ->when_sent = jiffies;
679#endif 665#endif
680 if(rc < 0) { 666 up(&ses->server->tcpSem);
681 DeleteMidQEntry(midQ); 667
682 up(&ses->server->tcpSem); 668 if(rc < 0)
683 /* If not lock req, update # of requests on wire to server */ 669 goto out;
684 if(long_op < 3) { 670
685 atomic_dec(&ses->server->inFlight);
686 wake_up(&ses->server->request_q);
687 }
688 return rc;
689 } else
690 up(&ses->server->tcpSem);
691 if (long_op == -1) 671 if (long_op == -1)
692 goto cifs_no_response_exit; 672 goto out;
693 else if (long_op == 2) /* writes past end of file can take loong time */ 673 else if (long_op == 2) /* writes past end of file can take loong time */
694 timeout = 180 * HZ; 674 timeout = 180 * HZ;
695 else if (long_op == 1) 675 else if (long_op == 1)
696 timeout = 45 * HZ; /* should be greater than 676 timeout = 45 * HZ; /* should be greater than
697 servers oplock break timeout (about 43 seconds) */ 677 servers oplock break timeout (about 43 seconds) */
698 else if (long_op > 2) { 678 else
699 timeout = MAX_SCHEDULE_TIMEOUT;
700 } else
701 timeout = 15 * HZ; 679 timeout = 15 * HZ;
702 /* wait for 15 seconds or until woken up due to response arriving or 680 /* wait for 15 seconds or until woken up due to response arriving or
703 due to last connection to this server being unmounted */ 681 due to last connection to this server being unmounted */
@@ -708,19 +686,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
708 } 686 }
709 687
710 /* No user interrupts in wait - wreaks havoc with performance */ 688 /* No user interrupts in wait - wreaks havoc with performance */
711 if(timeout != MAX_SCHEDULE_TIMEOUT) { 689 wait_for_response(ses, midQ, timeout, 10 * HZ);
712 timeout += jiffies;
713 wait_event(ses->server->response_q,
714 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
715 time_after(jiffies, timeout) ||
716 ((ses->server->tcpStatus != CifsGood) &&
717 (ses->server->tcpStatus != CifsNew)));
718 } else {
719 wait_event(ses->server->response_q,
720 (!(midQ->midState & MID_REQUEST_SUBMITTED)) ||
721 ((ses->server->tcpStatus != CifsGood) &&
722 (ses->server->tcpStatus != CifsNew)));
723 }
724 690
725 spin_lock(&GlobalMid_Lock); 691 spin_lock(&GlobalMid_Lock);
726 if (midQ->resp_buf) { 692 if (midQ->resp_buf) {
@@ -748,11 +714,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
748 } 714 }
749 spin_unlock(&GlobalMid_Lock); 715 spin_unlock(&GlobalMid_Lock);
750 DeleteMidQEntry(midQ); 716 DeleteMidQEntry(midQ);
751 /* If not lock req, update # of requests on wire to server */ 717 /* Update # of requests on wire to server */
752 if(long_op < 3) { 718 atomic_dec(&ses->server->inFlight);
753 atomic_dec(&ses->server->inFlight); 719 wake_up(&ses->server->request_q);
754 wake_up(&ses->server->request_q);
755 }
756 return rc; 720 return rc;
757 } 721 }
758 722
@@ -799,23 +763,253 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
799 cERROR(1,("Bad MID state?")); 763 cERROR(1,("Bad MID state?"));
800 } 764 }
801 } 765 }
802cifs_no_response_exit: 766
767out:
768
803 DeleteMidQEntry(midQ); 769 DeleteMidQEntry(midQ);
770 atomic_dec(&ses->server->inFlight);
771 wake_up(&ses->server->request_q);
804 772
805 if(long_op < 3) { 773 return rc;
806 atomic_dec(&ses->server->inFlight); 774}
807 wake_up(&ses->server->request_q); 775
808 } 776/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
777
778static int
779send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
780 struct mid_q_entry *midQ)
781{
782 int rc = 0;
783 struct cifsSesInfo *ses = tcon->ses;
784 __u16 mid = in_buf->Mid;
809 785
786 header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
787 in_buf->Mid = mid;
788 down(&ses->server->tcpSem);
789 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
790 if (rc) {
791 up(&ses->server->tcpSem);
792 return rc;
793 }
794 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
795 (struct sockaddr *) &(ses->server->addr.sockAddr));
796 up(&ses->server->tcpSem);
810 return rc; 797 return rc;
798}
799
800/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
801 blocking lock to return. */
802
803static int
804send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
805 struct smb_hdr *in_buf,
806 struct smb_hdr *out_buf)
807{
808 int bytes_returned;
809 struct cifsSesInfo *ses = tcon->ses;
810 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
811
812 /* We just modify the current in_buf to change
813 the type of lock from LOCKING_ANDX_SHARED_LOCK
814 or LOCKING_ANDX_EXCLUSIVE_LOCK to
815 LOCKING_ANDX_CANCEL_LOCK. */
816
817 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
818 pSMB->Timeout = 0;
819 pSMB->hdr.Mid = GetNextMid(ses->server);
820
821 return SendReceive(xid, ses, in_buf, out_buf,
822 &bytes_returned, 0);
823}
811 824
812out_unlock: 825int
826SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
827 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
828 int *pbytes_returned)
829{
830 int rc = 0;
831 int rstart = 0;
832 unsigned int receive_len;
833 struct mid_q_entry *midQ;
834 struct cifsSesInfo *ses;
835
836 if (tcon == NULL || tcon->ses == NULL) {
837 cERROR(1,("Null smb session"));
838 return -EIO;
839 }
840 ses = tcon->ses;
841
842 if(ses->server == NULL) {
843 cERROR(1,("Null tcp session"));
844 return -EIO;
845 }
846
847 if(ses->server->tcpStatus == CifsExiting)
848 return -ENOENT;
849
850 /* Ensure that we do not send more than 50 overlapping requests
851 to the same server. We may make this configurable later or
852 use ses->maxReq */
853
854 rc = wait_for_free_request(ses, 3);
855 if (rc)
856 return rc;
857
858 /* make sure that we sign in the same order that we send on this socket
859 and avoid races inside tcp sendmsg code that could cause corruption
860 of smb data */
861
862 down(&ses->server->tcpSem);
863
864 rc = allocate_mid(ses, in_buf, &midQ);
865 if (rc) {
866 up(&ses->server->tcpSem);
867 return rc;
868 }
869
870 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
871 up(&ses->server->tcpSem);
872 cERROR(1, ("Illegal length, greater than maximum frame, %d",
873 in_buf->smb_buf_length));
874 DeleteMidQEntry(midQ);
875 return -EIO;
876 }
877
878 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
879
880 midQ->midState = MID_REQUEST_SUBMITTED;
881#ifdef CONFIG_CIFS_STATS2
882 atomic_inc(&ses->server->inSend);
883#endif
884 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
885 (struct sockaddr *) &(ses->server->addr.sockAddr));
886#ifdef CONFIG_CIFS_STATS2
887 atomic_dec(&ses->server->inSend);
888 midQ->when_sent = jiffies;
889#endif
813 up(&ses->server->tcpSem); 890 up(&ses->server->tcpSem);
814 /* If not lock req, update # of requests on wire to server */ 891
815 if(long_op < 3) { 892 if(rc < 0) {
816 atomic_dec(&ses->server->inFlight); 893 DeleteMidQEntry(midQ);
817 wake_up(&ses->server->request_q); 894 return rc;
895 }
896
897 /* Wait for a reply - allow signals to interrupt. */
898 rc = wait_event_interruptible(ses->server->response_q,
899 (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
900 ((ses->server->tcpStatus != CifsGood) &&
901 (ses->server->tcpStatus != CifsNew)));
902
903 /* Were we interrupted by a signal ? */
904 if ((rc == -ERESTARTSYS) &&
905 (midQ->midState == MID_REQUEST_SUBMITTED) &&
906 ((ses->server->tcpStatus == CifsGood) ||
907 (ses->server->tcpStatus == CifsNew))) {
908
909 if (in_buf->Command == SMB_COM_TRANSACTION2) {
910 /* POSIX lock. We send a NT_CANCEL SMB to cause the
911 blocking lock to return. */
912
913 rc = send_nt_cancel(tcon, in_buf, midQ);
914 if (rc) {
915 DeleteMidQEntry(midQ);
916 return rc;
917 }
918 } else {
919 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
920 to cause the blocking lock to return. */
921
922 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
923
924 /* If we get -ENOLCK back the lock may have
925 already been removed. Don't exit in this case. */
926 if (rc && rc != -ENOLCK) {
927 DeleteMidQEntry(midQ);
928 return rc;
929 }
930 }
931
932 /* Wait 5 seconds for the response. */
933 if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) {
934 /* We got the response - restart system call. */
935 rstart = 1;
936 }
937 }
938
939 spin_lock(&GlobalMid_Lock);
940 if (midQ->resp_buf) {
941 spin_unlock(&GlobalMid_Lock);
942 receive_len = midQ->resp_buf->smb_buf_length;
943 } else {
944 cERROR(1,("No response for cmd %d mid %d",
945 midQ->command, midQ->mid));
946 if(midQ->midState == MID_REQUEST_SUBMITTED) {
947 if(ses->server->tcpStatus == CifsExiting)
948 rc = -EHOSTDOWN;
949 else {
950 ses->server->tcpStatus = CifsNeedReconnect;
951 midQ->midState = MID_RETRY_NEEDED;
952 }
953 }
954
955 if (rc != -EHOSTDOWN) {
956 if(midQ->midState == MID_RETRY_NEEDED) {
957 rc = -EAGAIN;
958 cFYI(1,("marking request for retry"));
959 } else {
960 rc = -EIO;
961 }
962 }
963 spin_unlock(&GlobalMid_Lock);
964 DeleteMidQEntry(midQ);
965 return rc;
818 } 966 }
967
968 if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
969 cERROR(1, ("Frame too large received. Length: %d Xid: %d",
970 receive_len, xid));
971 rc = -EIO;
972 } else { /* rcvd frame is ok */
973
974 if (midQ->resp_buf && out_buf
975 && (midQ->midState == MID_RESPONSE_RECEIVED)) {
976 out_buf->smb_buf_length = receive_len;
977 memcpy((char *)out_buf + 4,
978 (char *)midQ->resp_buf + 4,
979 receive_len);
980
981 dump_smb(out_buf, 92);
982 /* convert the length into a more usable form */
983 if((receive_len > 24) &&
984 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
985 SECMODE_SIGN_ENABLED))) {
986 rc = cifs_verify_signature(out_buf,
987 ses->server->mac_signing_key,
988 midQ->sequence_number+1);
989 if(rc) {
990 cERROR(1,("Unexpected SMB signature"));
991 /* BB FIXME add code to kill session */
992 }
993 }
994
995 *pbytes_returned = out_buf->smb_buf_length;
996
997 /* BB special case reconnect tid and uid here? */
998 rc = map_smb_to_linux_error(out_buf);
819 999
1000 /* convert ByteCount if necessary */
1001 if (receive_len >=
1002 sizeof (struct smb_hdr) -
1003 4 /* do not count RFC1001 header */ +
1004 (2 * out_buf->WordCount) + 2 /* bcc */ )
1005 BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
1006 } else {
1007 rc = -EIO;
1008 cERROR(1,("Bad MID state?"));
1009 }
1010 }
1011 DeleteMidQEntry(midQ);
1012 if (rstart && rc == -EACCES)
1013 return -ERESTARTSYS;
820 return rc; 1014 return rc;
821} 1015}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 7754d641775..067648b7179 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -330,11 +330,15 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
330 sb = direntry->d_inode->i_sb; 330 sb = direntry->d_inode->i_sb;
331 if(sb == NULL) 331 if(sb == NULL)
332 return -EIO; 332 return -EIO;
333 xid = GetXid();
334 333
335 cifs_sb = CIFS_SB(sb); 334 cifs_sb = CIFS_SB(sb);
336 pTcon = cifs_sb->tcon; 335 pTcon = cifs_sb->tcon;
337 336
337 if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
338 return -EOPNOTSUPP;
339
340 xid = GetXid();
341
338 full_path = build_path_from_dentry(direntry); 342 full_path = build_path_from_dentry(direntry);
339 if(full_path == NULL) { 343 if(full_path == NULL) {
340 FreeXid(xid); 344 FreeXid(xid);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 19ffb043abb..3a3567433b9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
1168eexit_1: 1168eexit_1:
1169 1169
1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", 1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1171 current, ep, epi->file, error)); 1171 current, ep, epi->ffd.file, error));
1172 1172
1173 return error; 1173 return error;
1174} 1174}
@@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
1236 struct eventpoll *ep = epi->ep; 1236 struct eventpoll *ep = epi->ep;
1237 1237
1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1239 current, epi->file, epi, ep)); 1239 current, epi->ffd.file, epi, ep));
1240 1240
1241 write_lock_irqsave(&ep->lock, flags); 1241 write_lock_irqsave(&ep->lock, flags);
1242 1242
diff --git a/fs/exec.c b/fs/exec.c
index 8344ba73a2a..54135df2a96 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -486,8 +486,6 @@ struct file *open_exec(const char *name)
486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && 486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
487 S_ISREG(inode->i_mode)) { 487 S_ISREG(inode->i_mode)) {
488 int err = vfs_permission(&nd, MAY_EXEC); 488 int err = vfs_permission(&nd, MAY_EXEC);
489 if (!err && !(inode->i_mode & 0111))
490 err = -EACCES;
491 file = ERR_PTR(err); 489 file = ERR_PTR(err);
492 if (!err) { 490 if (!err) {
493 file = nameidata_to_filp(&nd, O_RDONLY); 491 file = nameidata_to_filp(&nd, O_RDONLY);
@@ -753,7 +751,7 @@ no_thread_group:
753 751
754 write_lock_irq(&tasklist_lock); 752 write_lock_irq(&tasklist_lock);
755 spin_lock(&oldsighand->siglock); 753 spin_lock(&oldsighand->siglock);
756 spin_lock(&newsighand->siglock); 754 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
757 755
758 rcu_assign_pointer(current->sighand, newsighand); 756 rcu_assign_pointer(current->sighand, newsighand);
759 recalc_sigpending(); 757 recalc_sigpending();
@@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm)
922 int retval; 920 int retval;
923 921
924 mode = inode->i_mode; 922 mode = inode->i_mode;
925 /*
926 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
927 * generic_permission lets a non-executable through
928 */
929 if (!(mode & 0111)) /* with at least _one_ execute bit set */
930 return -EACCES;
931 if (bprm->file->f_op == NULL) 923 if (bprm->file->f_op == NULL)
932 return -EACCES; 924 return -EACCES;
933 925
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index f2702cda977..681dea8f953 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
775 if (EXT2_INODE_SIZE(sb) == 0) 775 if (EXT2_INODE_SIZE(sb) == 0)
776 goto cantfind_ext2; 776 goto cantfind_ext2;
777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); 777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
778 if (sbi->s_inodes_per_block == 0) 778 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
779 goto cantfind_ext2; 779 goto cantfind_ext2;
780 sbi->s_itb_per_group = sbi->s_inodes_per_group / 780 sbi->s_itb_per_group = sbi->s_inodes_per_group /
781 sbi->s_inodes_per_block; 781 sbi->s_inodes_per_block;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a504a40d6d2..063d994bda0 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1269 goal = le32_to_cpu(es->s_first_data_block); 1269 goal = le32_to_cpu(es->s_first_data_block);
1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) / 1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1271 EXT3_BLOCKS_PER_GROUP(sb); 1271 EXT3_BLOCKS_PER_GROUP(sb);
1272 goal_group = group_no;
1273retry_alloc:
1272 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1274 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1273 if (!gdp) 1275 if (!gdp)
1274 goto io_error; 1276 goto io_error;
1275 1277
1276 goal_group = group_no;
1277retry:
1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1279 /* 1279 /*
1280 * if there is not enough free blocks to make a new resevation 1280 * if there is not enough free blocks to make a new resevation
@@ -1349,7 +1349,7 @@ retry:
1349 if (my_rsv) { 1349 if (my_rsv) {
1350 my_rsv = NULL; 1350 my_rsv = NULL;
1351 group_no = goal_group; 1351 group_no = goal_group;
1352 goto retry; 1352 goto retry_alloc;
1353 } 1353 }
1354 /* No space left on the device */ 1354 /* No space left on the device */
1355 *errp = -ENOSPC; 1355 *errp = -ENOSPC;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 63614ed1633..5c4fcd1dbf5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
395 struct fuse_readpages_data data; 395 struct fuse_readpages_data data;
396 int err; 396 int err;
397 397
398 err = -EIO;
398 if (is_bad_inode(inode)) 399 if (is_bad_inode(inode))
399 return -EIO; 400 goto clean_pages_up;
400 401
401 data.file = file; 402 data.file = file;
402 data.inode = inode; 403 data.inode = inode;
403 data.req = fuse_get_req(fc); 404 data.req = fuse_get_req(fc);
405 err = PTR_ERR(data.req);
404 if (IS_ERR(data.req)) 406 if (IS_ERR(data.req))
405 return PTR_ERR(data.req); 407 goto clean_pages_up;
406 408
407 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 409 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
408 if (!err) { 410 if (!err) {
@@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
412 fuse_put_request(fc, data.req); 414 fuse_put_request(fc, data.req);
413 } 415 }
414 return err; 416 return err;
417
418clean_pages_up:
419 put_pages_list(pages);
420 return err;
415} 421}
416 422
417static size_t fuse_send_write(struct fuse_req *req, struct file *file, 423static size_t fuse_send_write(struct fuse_req *req, struct file *file,
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 93aa5715f22..78b1deae3fa 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
44 task->ioprio = ioprio; 44 task->ioprio = ioprio;
45 45
46 ioc = task->io_context; 46 ioc = task->io_context;
47 /* see wmb() in current_io_context() */
48 smp_read_barrier_depends();
49
47 if (ioc && ioc->set_ioprio) 50 if (ioc && ioc->set_ioprio)
48 ioc->set_ioprio(ioc, ioprio); 51 ioc->set_ioprio(ioc, ioprio);
49 52
@@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
111 continue; 114 continue;
112 ret = set_task_ioprio(p, ioprio); 115 ret = set_task_ioprio(p, ioprio);
113 if (ret) 116 if (ret)
114 break; 117 goto free_uid;
115 } while_each_thread(g, p); 118 } while_each_thread(g, p);
116 119free_uid:
117 if (who) 120 if (who)
118 free_uid(user); 121 free_uid(user);
119 break; 122 break;
@@ -137,6 +140,29 @@ out:
137 return ret; 140 return ret;
138} 141}
139 142
143int ioprio_best(unsigned short aprio, unsigned short bprio)
144{
145 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
146 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
147
148 if (!ioprio_valid(aprio))
149 return bprio;
150 if (!ioprio_valid(bprio))
151 return aprio;
152
153 if (aclass == IOPRIO_CLASS_NONE)
154 aclass = IOPRIO_CLASS_BE;
155 if (bclass == IOPRIO_CLASS_NONE)
156 bclass = IOPRIO_CLASS_BE;
157
158 if (aclass == bclass)
159 return min(aprio, bprio);
160 if (aclass > bclass)
161 return bprio;
162 else
163 return aprio;
164}
165
140asmlinkage long sys_ioprio_get(int which, int who) 166asmlinkage long sys_ioprio_get(int which, int who)
141{ 167{
142 struct task_struct *g, *p; 168 struct task_struct *g, *p;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 0971814c38b..42da6078431 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal)
261 struct buffer_head *bh = jh2bh(jh); 261 struct buffer_head *bh = jh2bh(jh);
262 262
263 jbd_lock_bh_state(bh); 263 jbd_lock_bh_state(bh);
264 kfree(jh->b_committed_data); 264 jbd_slab_free(jh->b_committed_data, bh->b_size);
265 jh->b_committed_data = NULL; 265 jh->b_committed_data = NULL;
266 jbd_unlock_bh_state(bh); 266 jbd_unlock_bh_state(bh);
267 } 267 }
@@ -745,14 +745,14 @@ restart_loop:
745 * Otherwise, we can just throw away the frozen data now. 745 * Otherwise, we can just throw away the frozen data now.
746 */ 746 */
747 if (jh->b_committed_data) { 747 if (jh->b_committed_data) {
748 kfree(jh->b_committed_data); 748 jbd_slab_free(jh->b_committed_data, bh->b_size);
749 jh->b_committed_data = NULL; 749 jh->b_committed_data = NULL;
750 if (jh->b_frozen_data) { 750 if (jh->b_frozen_data) {
751 jh->b_committed_data = jh->b_frozen_data; 751 jh->b_committed_data = jh->b_frozen_data;
752 jh->b_frozen_data = NULL; 752 jh->b_frozen_data = NULL;
753 } 753 }
754 } else if (jh->b_frozen_data) { 754 } else if (jh->b_frozen_data) {
755 kfree(jh->b_frozen_data); 755 jbd_slab_free(jh->b_frozen_data, bh->b_size);
756 jh->b_frozen_data = NULL; 756 jh->b_frozen_data = NULL;
757 } 757 }
758 758
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 8c9b28dff11..f66724ce443 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit);
84 84
85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86static void __journal_abort_soft (journal_t *journal, int errno); 86static void __journal_abort_soft (journal_t *journal, int errno);
87static int journal_create_jbd_slab(size_t slab_size);
87 88
88/* 89/*
89 * Helper function used to manage commit timeouts 90 * Helper function used to manage commit timeouts
@@ -328,10 +329,10 @@ repeat:
328 char *tmp; 329 char *tmp;
329 330
330 jbd_unlock_bh_state(bh_in); 331 jbd_unlock_bh_state(bh_in);
331 tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); 332 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
332 jbd_lock_bh_state(bh_in); 333 jbd_lock_bh_state(bh_in);
333 if (jh_in->b_frozen_data) { 334 if (jh_in->b_frozen_data) {
334 kfree(tmp); 335 jbd_slab_free(tmp, bh_in->b_size);
335 goto repeat; 336 goto repeat;
336 } 337 }
337 338
@@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal)
1069int journal_load(journal_t *journal) 1070int journal_load(journal_t *journal)
1070{ 1071{
1071 int err; 1072 int err;
1073 journal_superblock_t *sb;
1072 1074
1073 err = load_superblock(journal); 1075 err = load_superblock(journal);
1074 if (err) 1076 if (err)
1075 return err; 1077 return err;
1076 1078
1079 sb = journal->j_superblock;
1077 /* If this is a V2 superblock, then we have to check the 1080 /* If this is a V2 superblock, then we have to check the
1078 * features flags on it. */ 1081 * features flags on it. */
1079 1082
1080 if (journal->j_format_version >= 2) { 1083 if (journal->j_format_version >= 2) {
1081 journal_superblock_t *sb = journal->j_superblock;
1082
1083 if ((sb->s_feature_ro_compat & 1084 if ((sb->s_feature_ro_compat &
1084 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || 1085 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
1085 (sb->s_feature_incompat & 1086 (sb->s_feature_incompat &
@@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal)
1090 } 1091 }
1091 } 1092 }
1092 1093
1094 /*
1095 * Create a slab for this blocksize
1096 */
1097 err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
1098 if (err)
1099 return err;
1100
1093 /* Let the recovery code check whether it needs to recover any 1101 /* Let the recovery code check whether it needs to recover any
1094 * data from the journal. */ 1102 * data from the journal. */
1095 if (journal_recover(journal)) 1103 if (journal_recover(journal))
@@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1612} 1620}
1613 1621
1614/* 1622/*
1623 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1624 * and allocate frozen and commit buffers from these slabs.
1625 *
1626 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1627 * cause bh to cross page boundary.
1628 */
1629
1630#define JBD_MAX_SLABS 5
1631#define JBD_SLAB_INDEX(size) (size >> 11)
1632
1633static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
1634static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1636};
1637
1638static void journal_destroy_jbd_slabs(void)
1639{
1640 int i;
1641
1642 for (i = 0; i < JBD_MAX_SLABS; i++) {
1643 if (jbd_slab[i])
1644 kmem_cache_destroy(jbd_slab[i]);
1645 jbd_slab[i] = NULL;
1646 }
1647}
1648
1649static int journal_create_jbd_slab(size_t slab_size)
1650{
1651 int i = JBD_SLAB_INDEX(slab_size);
1652
1653 BUG_ON(i >= JBD_MAX_SLABS);
1654
1655 /*
1656 * Check if we already have a slab created for this size
1657 */
1658 if (jbd_slab[i])
1659 return 0;
1660
1661 /*
1662 * Create a slab and force alignment to be same as slabsize -
1663 * this will make sure that allocations won't cross the page
1664 * boundary.
1665 */
1666 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1667 slab_size, slab_size, 0, NULL, NULL);
1668 if (!jbd_slab[i]) {
1669 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1670 return -ENOMEM;
1671 }
1672 return 0;
1673}
1674
1675void * jbd_slab_alloc(size_t size, gfp_t flags)
1676{
1677 int idx;
1678
1679 idx = JBD_SLAB_INDEX(size);
1680 BUG_ON(jbd_slab[idx] == NULL);
1681 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1682}
1683
1684void jbd_slab_free(void *ptr, size_t size)
1685{
1686 int idx;
1687
1688 idx = JBD_SLAB_INDEX(size);
1689 BUG_ON(jbd_slab[idx] == NULL);
1690 kmem_cache_free(jbd_slab[idx], ptr);
1691}
1692
1693/*
1615 * Journal_head storage management 1694 * Journal_head storage management
1616 */ 1695 */
1617static kmem_cache_t *journal_head_cache; 1696static kmem_cache_t *journal_head_cache;
@@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1799 printk(KERN_WARNING "%s: freeing " 1878 printk(KERN_WARNING "%s: freeing "
1800 "b_frozen_data\n", 1879 "b_frozen_data\n",
1801 __FUNCTION__); 1880 __FUNCTION__);
1802 kfree(jh->b_frozen_data); 1881 jbd_slab_free(jh->b_frozen_data, bh->b_size);
1803 } 1882 }
1804 if (jh->b_committed_data) { 1883 if (jh->b_committed_data) {
1805 printk(KERN_WARNING "%s: freeing " 1884 printk(KERN_WARNING "%s: freeing "
1806 "b_committed_data\n", 1885 "b_committed_data\n",
1807 __FUNCTION__); 1886 __FUNCTION__);
1808 kfree(jh->b_committed_data); 1887 jbd_slab_free(jh->b_committed_data, bh->b_size);
1809 } 1888 }
1810 bh->b_private = NULL; 1889 bh->b_private = NULL;
1811 jh->b_bh = NULL; /* debug, really */ 1890 jh->b_bh = NULL; /* debug, really */
@@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void)
1961 journal_destroy_revoke_caches(); 2040 journal_destroy_revoke_caches();
1962 journal_destroy_journal_head_cache(); 2041 journal_destroy_journal_head_cache();
1963 journal_destroy_handle_cache(); 2042 journal_destroy_handle_cache();
2043 journal_destroy_jbd_slabs();
1964} 2044}
1965 2045
1966static int __init journal_init(void) 2046static int __init journal_init(void)
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 508b2ea91f4..de2e4cbbf79 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -666,8 +666,9 @@ repeat:
666 if (!frozen_buffer) { 666 if (!frozen_buffer) {
667 JBUFFER_TRACE(jh, "allocate memory for buffer"); 667 JBUFFER_TRACE(jh, "allocate memory for buffer");
668 jbd_unlock_bh_state(bh); 668 jbd_unlock_bh_state(bh);
669 frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, 669 frozen_buffer =
670 GFP_NOFS); 670 jbd_slab_alloc(jh2bh(jh)->b_size,
671 GFP_NOFS);
671 if (!frozen_buffer) { 672 if (!frozen_buffer) {
672 printk(KERN_EMERG 673 printk(KERN_EMERG
673 "%s: OOM for frozen_buffer\n", 674 "%s: OOM for frozen_buffer\n",
@@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
879 880
880repeat: 881repeat:
881 if (!jh->b_committed_data) { 882 if (!jh->b_committed_data) {
882 committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); 883 committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
883 if (!committed_data) { 884 if (!committed_data) {
884 printk(KERN_EMERG "%s: No memory for committed data\n", 885 printk(KERN_EMERG "%s: No memory for committed data\n",
885 __FUNCTION__); 886 __FUNCTION__);
@@ -906,7 +907,7 @@ repeat:
906out: 907out:
907 journal_put_journal_head(jh); 908 journal_put_journal_head(jh);
908 if (unlikely(committed_data)) 909 if (unlikely(committed_data))
909 kfree(committed_data); 910 jbd_slab_free(committed_data, bh->b_size);
910 return err; 911 return err;
911} 912}
912 913
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 43e3f566aad..a223cf4faa9 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode)
168 set_cflag(COMMIT_Dirty, inode); 168 set_cflag(COMMIT_Dirty, inode);
169} 169}
170 170
171static int 171int jfs_get_block(struct inode *ip, sector_t lblock,
172jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, 172 struct buffer_head *bh_result, int create)
173 struct buffer_head *bh_result, int create)
174{ 173{
175 s64 lblock64 = lblock; 174 s64 lblock64 = lblock;
176 int rc = 0; 175 int rc = 0;
177 xad_t xad; 176 xad_t xad;
178 s64 xaddr; 177 s64 xaddr;
179 int xflag; 178 int xflag;
180 s32 xlen = max_blocks; 179 s32 xlen = bh_result->b_size >> ip->i_blkbits;
181 180
182 /* 181 /*
183 * Take appropriate lock on inode 182 * Take appropriate lock on inode
@@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
188 IREAD_LOCK(ip); 187 IREAD_LOCK(ip);
189 188
190 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && 189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
191 (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && 190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
192 xaddr) { 191 xaddr) {
193 if (xflag & XAD_NOTRECORDED) { 192 if (xflag & XAD_NOTRECORDED) {
194 if (!create) 193 if (!create)
@@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
255 return rc; 254 return rc;
256} 255}
257 256
258static int jfs_get_block(struct inode *ip, sector_t lblock,
259 struct buffer_head *bh_result, int create)
260{
261 return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
262 bh_result, create);
263}
264
265static int jfs_writepage(struct page *page, struct writeback_control *wbc) 257static int jfs_writepage(struct page *page, struct writeback_control *wbc)
266{ 258{
267 return nobh_writepage(page, jfs_get_block, wbc); 259 return nobh_writepage(page, jfs_get_block, wbc);
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index b5c7da6190d..1fc48df670c 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
32extern void jfs_free_zero_link(struct inode *); 32extern void jfs_free_zero_link(struct inode *);
33extern struct dentry *jfs_get_parent(struct dentry *dentry); 33extern struct dentry *jfs_get_parent(struct dentry *dentry);
34extern void jfs_set_inode_flags(struct inode *); 34extern void jfs_set_inode_flags(struct inode *);
35extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
35 36
36extern const struct address_space_operations jfs_aops; 37extern const struct address_space_operations jfs_aops;
37extern struct inode_operations jfs_dir_inode_operations; 38extern struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f6cfebc82d..143bcd1d5ea 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -26,6 +26,7 @@
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/posix_acl.h> 28#include <linux/posix_acl.h>
29#include <linux/buffer_head.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <linux/seq_file.h> 31#include <linux/seq_file.h>
31 32
@@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
298 break; 299 break;
299 } 300 }
300 301
301#if defined(CONFIG_QUOTA) 302#ifdef CONFIG_QUOTA
302 case Opt_quota: 303 case Opt_quota:
303 case Opt_usrquota: 304 case Opt_usrquota:
304 *flag |= JFS_USRQUOTA; 305 *flag |= JFS_USRQUOTA;
@@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
597 if (sbi->flag & JFS_NOINTEGRITY) 598 if (sbi->flag & JFS_NOINTEGRITY)
598 seq_puts(seq, ",nointegrity"); 599 seq_puts(seq, ",nointegrity");
599 600
600#if defined(CONFIG_QUOTA) 601#ifdef CONFIG_QUOTA
601 if (sbi->flag & JFS_USRQUOTA) 602 if (sbi->flag & JFS_USRQUOTA)
602 seq_puts(seq, ",usrquota"); 603 seq_puts(seq, ",usrquota");
603 604
@@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
608 return 0; 609 return 0;
609} 610}
610 611
612#ifdef CONFIG_QUOTA
613
614/* Read data from quotafile - avoid pagecache and such because we cannot afford
615 * acquiring the locks... As quota files are never truncated and quota code
616 * itself serializes the operations (and noone else should touch the files)
617 * we don't have to be afraid of races */
618static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
619 size_t len, loff_t off)
620{
621 struct inode *inode = sb_dqopt(sb)->files[type];
622 sector_t blk = off >> sb->s_blocksize_bits;
623 int err = 0;
624 int offset = off & (sb->s_blocksize - 1);
625 int tocopy;
626 size_t toread;
627 struct buffer_head tmp_bh;
628 struct buffer_head *bh;
629 loff_t i_size = i_size_read(inode);
630
631 if (off > i_size)
632 return 0;
633 if (off+len > i_size)
634 len = i_size-off;
635 toread = len;
636 while (toread > 0) {
637 tocopy = sb->s_blocksize - offset < toread ?
638 sb->s_blocksize - offset : toread;
639
640 tmp_bh.b_state = 0;
641 tmp_bh.b_size = 1 << inode->i_blkbits;
642 err = jfs_get_block(inode, blk, &tmp_bh, 0);
643 if (err)
644 return err;
645 if (!buffer_mapped(&tmp_bh)) /* A hole? */
646 memset(data, 0, tocopy);
647 else {
648 bh = sb_bread(sb, tmp_bh.b_blocknr);
649 if (!bh)
650 return -EIO;
651 memcpy(data, bh->b_data+offset, tocopy);
652 brelse(bh);
653 }
654 offset = 0;
655 toread -= tocopy;
656 data += tocopy;
657 blk++;
658 }
659 return len;
660}
661
662/* Write to quotafile */
663static ssize_t jfs_quota_write(struct super_block *sb, int type,
664 const char *data, size_t len, loff_t off)
665{
666 struct inode *inode = sb_dqopt(sb)->files[type];
667 sector_t blk = off >> sb->s_blocksize_bits;
668 int err = 0;
669 int offset = off & (sb->s_blocksize - 1);
670 int tocopy;
671 size_t towrite = len;
672 struct buffer_head tmp_bh;
673 struct buffer_head *bh;
674
675 mutex_lock(&inode->i_mutex);
676 while (towrite > 0) {
677 tocopy = sb->s_blocksize - offset < towrite ?
678 sb->s_blocksize - offset : towrite;
679
680 tmp_bh.b_state = 0;
681 tmp_bh.b_size = 1 << inode->i_blkbits;
682 err = jfs_get_block(inode, blk, &tmp_bh, 1);
683 if (err)
684 goto out;
685 if (offset || tocopy != sb->s_blocksize)
686 bh = sb_bread(sb, tmp_bh.b_blocknr);
687 else
688 bh = sb_getblk(sb, tmp_bh.b_blocknr);
689 if (!bh) {
690 err = -EIO;
691 goto out;
692 }
693 lock_buffer(bh);
694 memcpy(bh->b_data+offset, data, tocopy);
695 flush_dcache_page(bh->b_page);
696 set_buffer_uptodate(bh);
697 mark_buffer_dirty(bh);
698 unlock_buffer(bh);
699 brelse(bh);
700 offset = 0;
701 towrite -= tocopy;
702 data += tocopy;
703 blk++;
704 }
705out:
706 if (len == towrite)
707 return err;
708 if (inode->i_size < off+len-towrite)
709 i_size_write(inode, off+len-towrite);
710 inode->i_version++;
711 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
712 mark_inode_dirty(inode);
713 mutex_unlock(&inode->i_mutex);
714 return len - towrite;
715}
716
717#endif
718
611static struct super_operations jfs_super_operations = { 719static struct super_operations jfs_super_operations = {
612 .alloc_inode = jfs_alloc_inode, 720 .alloc_inode = jfs_alloc_inode,
613 .destroy_inode = jfs_destroy_inode, 721 .destroy_inode = jfs_destroy_inode,
@@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = {
621 .unlockfs = jfs_unlockfs, 729 .unlockfs = jfs_unlockfs,
622 .statfs = jfs_statfs, 730 .statfs = jfs_statfs,
623 .remount_fs = jfs_remount, 731 .remount_fs = jfs_remount,
624 .show_options = jfs_show_options 732 .show_options = jfs_show_options,
733#ifdef CONFIG_QUOTA
734 .quota_read = jfs_quota_read,
735 .quota_write = jfs_quota_write,
736#endif
625}; 737};
626 738
627static struct export_operations jfs_export_operations = { 739static struct export_operations jfs_export_operations = {
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 2a4df9b3779..01b4db9e546 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -237,19 +237,22 @@ static int
237nlm_traverse_files(struct nlm_host *host, int action) 237nlm_traverse_files(struct nlm_host *host, int action)
238{ 238{
239 struct nlm_file *file, **fp; 239 struct nlm_file *file, **fp;
240 int i; 240 int i, ret = 0;
241 241
242 mutex_lock(&nlm_file_mutex); 242 mutex_lock(&nlm_file_mutex);
243 for (i = 0; i < FILE_NRHASH; i++) { 243 for (i = 0; i < FILE_NRHASH; i++) {
244 fp = nlm_files + i; 244 fp = nlm_files + i;
245 while ((file = *fp) != NULL) { 245 while ((file = *fp) != NULL) {
246 file->f_count++;
247 mutex_unlock(&nlm_file_mutex);
248
246 /* Traverse locks, blocks and shares of this file 249 /* Traverse locks, blocks and shares of this file
247 * and update file->f_locks count */ 250 * and update file->f_locks count */
248 if (nlm_inspect_file(host, file, action)) { 251 if (nlm_inspect_file(host, file, action))
249 mutex_unlock(&nlm_file_mutex); 252 ret = 1;
250 return 1;
251 }
252 253
254 mutex_lock(&nlm_file_mutex);
255 file->f_count--;
253 /* No more references to this file. Let go of it. */ 256 /* No more references to this file. Let go of it. */
254 if (!file->f_blocks && !file->f_locks 257 if (!file->f_blocks && !file->f_locks
255 && !file->f_shares && !file->f_count) { 258 && !file->f_shares && !file->f_count) {
@@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
262 } 265 }
263 } 266 }
264 mutex_unlock(&nlm_file_mutex); 267 mutex_unlock(&nlm_file_mutex);
265 return 0; 268 return ret;
266} 269}
267 270
268/* 271/*
diff --git a/fs/locks.c b/fs/locks.c
index b0b41a64e10..d7c53392cac 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
1421 if (!leases_enable) 1421 if (!leases_enable)
1422 goto out; 1422 goto out;
1423 1423
1424 error = lease_alloc(filp, arg, &fl); 1424 error = -ENOMEM;
1425 if (error) 1425 fl = locks_alloc_lock();
1426 if (fl == NULL)
1426 goto out; 1427 goto out;
1427 1428
1428 locks_copy_lock(fl, lease); 1429 locks_copy_lock(fl, lease);
@@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
1430 locks_insert_lock(before, fl); 1431 locks_insert_lock(before, fl);
1431 1432
1432 *flp = fl; 1433 *flp = fl;
1434 error = 0;
1433out: 1435out:
1434 return error; 1436 return error;
1435} 1437}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 9ea91c5eeb7..330ff9fc7cf 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
204 /* 204 /*
205 * Allocate the buffer map to keep the superblock small. 205 * Allocate the buffer map to keep the superblock small.
206 */ 206 */
207 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
208 goto out_illegal_sb;
207 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); 209 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
208 map = kmalloc(i, GFP_KERNEL); 210 map = kmalloc(i, GFP_KERNEL);
209 if (!map) 211 if (!map)
@@ -263,7 +265,7 @@ out_no_root:
263 265
264out_no_bitmap: 266out_no_bitmap:
265 printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); 267 printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
266 out_freemap: 268out_freemap:
267 for (i = 0; i < sbi->s_imap_blocks; i++) 269 for (i = 0; i < sbi->s_imap_blocks; i++)
268 brelse(sbi->s_imap[i]); 270 brelse(sbi->s_imap[i]);
269 for (i = 0; i < sbi->s_zmap_blocks; i++) 271 for (i = 0; i < sbi->s_zmap_blocks; i++)
@@ -276,11 +278,16 @@ out_no_map:
276 printk("MINIX-fs: can't allocate map\n"); 278 printk("MINIX-fs: can't allocate map\n");
277 goto out_release; 279 goto out_release;
278 280
281out_illegal_sb:
282 if (!silent)
283 printk("MINIX-fs: bad superblock\n");
284 goto out_release;
285
279out_no_fs: 286out_no_fs:
280 if (!silent) 287 if (!silent)
281 printk("VFS: Can't find a Minix or Minix V2 filesystem " 288 printk("VFS: Can't find a Minix or Minix V2 filesystem "
282 "on device %s\n", s->s_id); 289 "on device %s\n", s->s_id);
283 out_release: 290out_release:
284 brelse(bh); 291 brelse(bh);
285 goto out; 292 goto out;
286 293
@@ -290,7 +297,7 @@ out_bad_hblock:
290 297
291out_bad_sb: 298out_bad_sb:
292 printk("MINIX-fs: unable to read superblock\n"); 299 printk("MINIX-fs: unable to read superblock\n");
293 out: 300out:
294 s->s_fs_info = NULL; 301 s->s_fs_info = NULL;
295 kfree(sbi); 302 kfree(sbi);
296 return -EINVAL; 303 return -EINVAL;
diff --git a/fs/namei.c b/fs/namei.c
index 55a131230f9..432d6bc6fab 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask,
227 227
228int permission(struct inode *inode, int mask, struct nameidata *nd) 228int permission(struct inode *inode, int mask, struct nameidata *nd)
229{ 229{
230 umode_t mode = inode->i_mode;
230 int retval, submask; 231 int retval, submask;
231 232
232 if (mask & MAY_WRITE) { 233 if (mask & MAY_WRITE) {
233 umode_t mode = inode->i_mode;
234 234
235 /* 235 /*
236 * Nobody gets write access to a read-only fs. 236 * Nobody gets write access to a read-only fs.
@@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
247 } 247 }
248 248
249 249
250 /*
251 * MAY_EXEC on regular files requires special handling: We override
252 * filesystem execute permissions if the mode bits aren't set.
253 */
254 if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
255 return -EACCES;
256
250 /* Ordinary permission routines do not understand MAY_APPEND. */ 257 /* Ordinary permission routines do not understand MAY_APPEND. */
251 submask = mask & ~MAY_APPEND; 258 submask = mask & ~MAY_APPEND;
252 if (inode->i_op && inode->i_op->permission) 259 if (inode->i_op && inode->i_op->permission)
@@ -1767,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
1767 if (nd->last_type != LAST_NORM) 1774 if (nd->last_type != LAST_NORM)
1768 goto fail; 1775 goto fail;
1769 nd->flags &= ~LOOKUP_PARENT; 1776 nd->flags &= ~LOOKUP_PARENT;
1777 nd->flags |= LOOKUP_CREATE;
1778 nd->intent.open.flags = O_EXCL;
1770 1779
1771 /* 1780 /*
1772 * Do the final lookup. 1781 * Do the final lookup.
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cc2b874ad5a..48e892880d5 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
312 312
313static int nfs_release_page(struct page *page, gfp_t gfp) 313static int nfs_release_page(struct page *page, gfp_t gfp)
314{ 314{
315 return !nfs_wb_page(page->mapping->host, page); 315 if (gfp & __GFP_FS)
316 return !nfs_wb_page(page->mapping->host, page);
317 else
318 /*
319 * Avoid deadlock on nfs_wait_on_request().
320 */
321 return 0;
316} 322}
317 323
318const struct address_space_operations nfs_file_aops = { 324const struct address_space_operations nfs_file_aops = {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b81e7ed3c90..07a5dd57646 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp)
130 130
131 if (!idmap) 131 if (!idmap)
132 return; 132 return;
133 dput(idmap->idmap_dentry); 133 rpc_unlink(idmap->idmap_dentry);
134 idmap->idmap_dentry = NULL;
135 rpc_unlink(idmap->idmap_path);
136 clp->cl_idmap = NULL; 134 clp->cl_idmap = NULL;
137 kfree(idmap); 135 kfree(idmap);
138} 136}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e6ee97f19d8..153898e1331 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2668,7 +2668,7 @@ out:
2668 nfs4_set_cached_acl(inode, acl); 2668 nfs4_set_cached_acl(inode, acl);
2669} 2669}
2670 2670
2671static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 2671static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2672{ 2672{
2673 struct page *pages[NFS4ACL_MAXPAGES]; 2673 struct page *pages[NFS4ACL_MAXPAGES];
2674 struct nfs_getaclargs args = { 2674 struct nfs_getaclargs args = {
@@ -2721,6 +2721,19 @@ out_free:
2721 return ret; 2721 return ret;
2722} 2722}
2723 2723
2724static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2725{
2726 struct nfs4_exception exception = { };
2727 ssize_t ret;
2728 do {
2729 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
2730 if (ret >= 0)
2731 break;
2732 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
2733 } while (exception.retry);
2734 return ret;
2735}
2736
2724static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 2737static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2725{ 2738{
2726 struct nfs_server *server = NFS_SERVER(inode); 2739 struct nfs_server *server = NFS_SERVER(inode);
@@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2737 return nfs4_get_acl_uncached(inode, buf, buflen); 2750 return nfs4_get_acl_uncached(inode, buf, buflen);
2738} 2751}
2739 2752
2740static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 2753static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2741{ 2754{
2742 struct nfs_server *server = NFS_SERVER(inode); 2755 struct nfs_server *server = NFS_SERVER(inode);
2743 struct page *pages[NFS4ACL_MAXPAGES]; 2756 struct page *pages[NFS4ACL_MAXPAGES];
@@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
2763 return ret; 2776 return ret;
2764} 2777}
2765 2778
2779static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2780{
2781 struct nfs4_exception exception = { };
2782 int err;
2783 do {
2784 err = nfs4_handle_exception(NFS_SERVER(inode),
2785 __nfs4_proc_set_acl(inode, buf, buflen),
2786 &exception);
2787 } while (exception.retry);
2788 return err;
2789}
2790
2766static int 2791static int
2767nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) 2792nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
2768{ 2793{
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1750d996f49..730ec8fb31c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3355 struct kvec *iov = rcvbuf->head; 3355 struct kvec *iov = rcvbuf->head;
3356 unsigned int nr, pglen = rcvbuf->page_len; 3356 unsigned int nr, pglen = rcvbuf->page_len;
3357 uint32_t *end, *entry, *p, *kaddr; 3357 uint32_t *end, *entry, *p, *kaddr;
3358 uint32_t len, attrlen; 3358 uint32_t len, attrlen, xlen;
3359 int hdrlen, recvd, status; 3359 int hdrlen, recvd, status;
3360 3360
3361 status = decode_op_hdr(xdr, OP_READDIR); 3361 status = decode_op_hdr(xdr, OP_READDIR);
@@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3377 3377
3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); 3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE);
3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); 3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0);
3380 end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); 3380 end = p + ((pglen + readdir->pgbase) >> 2);
3381 entry = p; 3381 entry = p;
3382 for (nr = 0; *p++; nr++) { 3382 for (nr = 0; *p++; nr++) {
3383 if (p + 3 > end) 3383 if (end - p < 3)
3384 goto short_pkt; 3384 goto short_pkt;
3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p)); 3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p));
3386 p += 2; /* cookie */ 3386 p += 2; /* cookie */
@@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); 3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
3390 goto err_unmap; 3390 goto err_unmap;
3391 } 3391 }
3392 dprintk("filename = %*s\n", len, (char *)p); 3392 xlen = XDR_QUADLEN(len);
3393 p += XDR_QUADLEN(len); 3393 if (end - p < xlen + 1)
3394 if (p + 1 > end)
3395 goto short_pkt; 3394 goto short_pkt;
3395 dprintk("filename = %*s\n", len, (char *)p);
3396 p += xlen;
3396 len = ntohl(*p++); /* bitmap length */ 3397 len = ntohl(*p++); /* bitmap length */
3397 p += len; 3398 if (end - p < len + 1)
3398 if (p + 1 > end)
3399 goto short_pkt; 3399 goto short_pkt;
3400 p += len;
3400 attrlen = XDR_QUADLEN(ntohl(*p++)); 3401 attrlen = XDR_QUADLEN(ntohl(*p++));
3401 p += attrlen; /* attributes */ 3402 if (end - p < attrlen + 2)
3402 if (p + 2 > end)
3403 goto short_pkt; 3403 goto short_pkt;
3404 p += attrlen; /* attributes */
3404 entry = p; 3405 entry = p;
3405 } 3406 }
3406 if (!nr && (entry[0] != 0 || entry[1] == 0)) 3407 if (!nr && (entry[0] != 0 || entry[1] == 0))
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 65c0c5b3235..da9cf11c326 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
117 base &= ~PAGE_CACHE_MASK; 117 base &= ~PAGE_CACHE_MASK;
118 pglen = PAGE_CACHE_SIZE - base; 118 pglen = PAGE_CACHE_SIZE - base;
119 if (pglen < remainder) 119 for (;;) {
120 if (remainder <= pglen) {
121 memclear_highpage_flush(*pages, base, remainder);
122 break;
123 }
120 memclear_highpage_flush(*pages, base, pglen); 124 memclear_highpage_flush(*pages, base, pglen);
121 else 125 pages++;
122 memclear_highpage_flush(*pages, base, remainder); 126 remainder -= pglen;
127 pglen = PAGE_CACHE_SIZE;
128 base = 0;
129 }
123} 130}
124 131
125/* 132/*
@@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
476 unsigned int base = data->args.pgbase; 483 unsigned int base = data->args.pgbase;
477 struct page **pages; 484 struct page **pages;
478 485
486 if (data->res.eof)
487 count = data->args.count;
479 if (unlikely(count == 0)) 488 if (unlikely(count == 0))
480 return; 489 return;
481 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 490 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
@@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
483 count += base; 492 count += base;
484 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 493 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
485 SetPageUptodate(*pages); 494 SetPageUptodate(*pages);
486 /* 495 if (count != 0)
487 * Was this an eof or a short read? If the latter, don't mark the page
488 * as uptodate yet.
489 */
490 if (count > 0 && (data->res.eof || data->args.count == data->res.count))
491 SetPageUptodate(*pages); 496 SetPageUptodate(*pages);
492} 497}
493 498
@@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
502 count += base; 507 count += base;
503 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 508 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
504 SetPageError(*pages); 509 SetPageError(*pages);
510 if (count != 0)
511 SetPageError(*pages);
505} 512}
506 513
507/* 514/*
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 1b8346dd057..9503240ef0e 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2375,7 +2375,6 @@ leave:
2375 mlog(0, "returning %d\n", ret); 2375 mlog(0, "returning %d\n", ret);
2376 return ret; 2376 return ret;
2377} 2377}
2378EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2379 2378
2380int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2379int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2381{ 2380{
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index b0c3134f4f7..37be4b2e0d4 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
155 else 155 else
156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); 156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
157 157
158 if (status != DLM_NORMAL) 158 if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
159 goto leave; 159 goto leave;
160 160
161 /* By now this has been masked out of cancel requests. */ 161 /* By now this has been masked out of cancel requests. */
@@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
183 spin_lock(&lock->spinlock); 183 spin_lock(&lock->spinlock);
184 /* if the master told us the lock was already granted, 184 /* if the master told us the lock was already granted,
185 * let the ast handle all of these actions */ 185 * let the ast handle all of these actions */
186 if (status == DLM_NORMAL && 186 if (status == DLM_CANCELGRANT) {
187 lksb->status == DLM_CANCELGRANT) {
188 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 187 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
189 DLM_UNLOCK_REGRANT_LOCK| 188 DLM_UNLOCK_REGRANT_LOCK|
190 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 189 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
@@ -349,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
349 vec, veclen, owner, &status); 348 vec, veclen, owner, &status);
350 if (tmpret >= 0) { 349 if (tmpret >= 0) {
351 // successfully sent and received 350 // successfully sent and received
352 if (status == DLM_CANCELGRANT) 351 if (status == DLM_FORWARD)
353 ret = DLM_NORMAL;
354 else if (status == DLM_FORWARD) {
355 mlog(0, "master was in-progress. retry\n"); 352 mlog(0, "master was in-progress. retry\n");
356 ret = DLM_FORWARD; 353 ret = status;
357 } else
358 ret = status;
359 lksb->status = status;
360 } else { 354 } else {
361 mlog_errno(tmpret); 355 mlog_errno(tmpret);
362 if (dlm_is_host_down(tmpret)) { 356 if (dlm_is_host_down(tmpret)) {
@@ -372,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
372 /* something bad. this will BUG in ocfs2 */ 366 /* something bad. this will BUG in ocfs2 */
373 ret = dlm_err_to_dlm_status(tmpret); 367 ret = dlm_err_to_dlm_status(tmpret);
374 } 368 }
375 lksb->status = ret;
376 } 369 }
377 370
378 return ret; 371 return ret;
@@ -483,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
483 476
484 /* lock was found on queue */ 477 /* lock was found on queue */
485 lksb = lock->lksb; 478 lksb = lock->lksb;
479 if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
480 lock->ml.type != LKM_EXMODE)
481 flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
482
486 /* unlockast only called on originating node */ 483 /* unlockast only called on originating node */
487 if (flags & LKM_PUT_LVB) { 484 if (flags & LKM_PUT_LVB) {
488 lksb->flags |= DLM_LKSB_PUT_LVB; 485 lksb->flags |= DLM_LKSB_PUT_LVB;
@@ -507,11 +504,8 @@ not_found:
507 "cookie=%u:%llu\n", 504 "cookie=%u:%llu\n",
508 dlm_get_lock_cookie_node(unlock->cookie), 505 dlm_get_lock_cookie_node(unlock->cookie),
509 dlm_get_lock_cookie_seq(unlock->cookie)); 506 dlm_get_lock_cookie_seq(unlock->cookie));
510 else { 507 else
511 /* send the lksb->status back to the other node */
512 status = lksb->status;
513 dlm_lock_put(lock); 508 dlm_lock_put(lock);
514 }
515 509
516leave: 510leave:
517 if (res) 511 if (res)
@@ -533,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
533 527
534 if (dlm_lock_on_list(&res->blocked, lock)) { 528 if (dlm_lock_on_list(&res->blocked, lock)) {
535 /* cancel this outright */ 529 /* cancel this outright */
536 lksb->status = DLM_NORMAL;
537 status = DLM_NORMAL; 530 status = DLM_NORMAL;
538 *actions = (DLM_UNLOCK_CALL_AST | 531 *actions = (DLM_UNLOCK_CALL_AST |
539 DLM_UNLOCK_REMOVE_LOCK); 532 DLM_UNLOCK_REMOVE_LOCK);
540 } else if (dlm_lock_on_list(&res->converting, lock)) { 533 } else if (dlm_lock_on_list(&res->converting, lock)) {
541 /* cancel the request, put back on granted */ 534 /* cancel the request, put back on granted */
542 lksb->status = DLM_NORMAL;
543 status = DLM_NORMAL; 535 status = DLM_NORMAL;
544 *actions = (DLM_UNLOCK_CALL_AST | 536 *actions = (DLM_UNLOCK_CALL_AST |
545 DLM_UNLOCK_REMOVE_LOCK | 537 DLM_UNLOCK_REMOVE_LOCK |
546 DLM_UNLOCK_REGRANT_LOCK | 538 DLM_UNLOCK_REGRANT_LOCK |
547 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 539 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
548 } else if (dlm_lock_on_list(&res->granted, lock)) { 540 } else if (dlm_lock_on_list(&res->granted, lock)) {
549 /* too late, already granted. DLM_CANCELGRANT */ 541 /* too late, already granted. */
550 lksb->status = DLM_CANCELGRANT; 542 status = DLM_CANCELGRANT;
551 status = DLM_NORMAL;
552 *actions = DLM_UNLOCK_CALL_AST; 543 *actions = DLM_UNLOCK_CALL_AST;
553 } else { 544 } else {
554 mlog(ML_ERROR, "lock to cancel is not on any list!\n"); 545 mlog(ML_ERROR, "lock to cancel is not on any list!\n");
555 lksb->status = DLM_IVLOCKID;
556 status = DLM_IVLOCKID; 546 status = DLM_IVLOCKID;
557 *actions = 0; 547 *actions = 0;
558 } 548 }
@@ -569,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
569 559
570 /* unlock request */ 560 /* unlock request */
571 if (!dlm_lock_on_list(&res->granted, lock)) { 561 if (!dlm_lock_on_list(&res->granted, lock)) {
572 lksb->status = DLM_DENIED;
573 status = DLM_DENIED; 562 status = DLM_DENIED;
574 dlm_error(status); 563 dlm_error(status);
575 *actions = 0; 564 *actions = 0;
576 } else { 565 } else {
577 /* unlock granted lock */ 566 /* unlock granted lock */
578 lksb->status = DLM_NORMAL;
579 status = DLM_NORMAL; 567 status = DLM_NORMAL;
580 *actions = (DLM_UNLOCK_FREE_LOCK | 568 *actions = (DLM_UNLOCK_FREE_LOCK |
581 DLM_UNLOCK_CALL_AST | 569 DLM_UNLOCK_CALL_AST |
@@ -632,6 +620,8 @@ retry:
632 620
633 spin_lock(&res->spinlock); 621 spin_lock(&res->spinlock);
634 is_master = (res->owner == dlm->node_num); 622 is_master = (res->owner == dlm->node_num);
623 if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
624 flags &= ~LKM_VALBLK;
635 spin_unlock(&res->spinlock); 625 spin_unlock(&res->spinlock);
636 626
637 if (is_master) { 627 if (is_master) {
@@ -665,7 +655,7 @@ retry:
665 } 655 }
666 656
667 if (call_ast) { 657 if (call_ast) {
668 mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); 658 mlog(0, "calling unlockast(%p, %d)\n", data, status);
669 if (is_master) { 659 if (is_master) {
670 /* it is possible that there is one last bast 660 /* it is possible that there is one last bast
671 * pending. make sure it is flushed, then 661 * pending. make sure it is flushed, then
@@ -677,9 +667,12 @@ retry:
677 wait_event(dlm->ast_wq, 667 wait_event(dlm->ast_wq,
678 dlm_lock_basts_flushed(dlm, lock)); 668 dlm_lock_basts_flushed(dlm, lock));
679 } 669 }
680 (*unlockast)(data, lksb->status); 670 (*unlockast)(data, status);
681 } 671 }
682 672
673 if (status == DLM_CANCELGRANT)
674 status = DLM_NORMAL;
675
683 if (status == DLM_NORMAL) { 676 if (status == DLM_NORMAL) {
684 mlog(0, "kicking the thread\n"); 677 mlog(0, "kicking the thread\n");
685 dlm_kick_thread(dlm, res); 678 dlm_kick_thread(dlm, res);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 0d1973ea32b..1f17a4d0828 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
840 840
841 mlog(0, "Allocating %u clusters for a new window.\n", 841 mlog(0, "Allocating %u clusters for a new window.\n",
842 ocfs2_local_alloc_window_bits(osb)); 842 ocfs2_local_alloc_window_bits(osb));
843
844 /* Instruct the allocation code to try the most recently used
845 * cluster group. We'll re-record the group used this pass
846 * below. */
847 ac->ac_last_group = osb->la_last_gd;
848
843 /* we used the generic suballoc reserve function, but we set 849 /* we used the generic suballoc reserve function, but we set
844 * everything up nicely, so there's no reason why we can't use 850 * everything up nicely, so there's no reason why we can't use
845 * the more specific cluster api to claim bits. */ 851 * the more specific cluster api to claim bits. */
@@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
852 goto bail; 858 goto bail;
853 } 859 }
854 860
861 osb->la_last_gd = ac->ac_last_group;
862
855 la->la_bm_off = cpu_to_le32(cluster_off); 863 la->la_bm_off = cpu_to_le32(cluster_off);
856 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); 864 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
857 /* just in case... In the future when we find space ourselves, 865 /* just in case... In the future when we find space ourselves,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index cd4a6f253d1..0462a7f4e21 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -197,7 +197,6 @@ struct ocfs2_super
197 struct ocfs2_node_map recovery_map; 197 struct ocfs2_node_map recovery_map;
198 struct ocfs2_node_map umount_map; 198 struct ocfs2_node_map umount_map;
199 199
200 u32 num_clusters;
201 u64 root_blkno; 200 u64 root_blkno;
202 u64 system_dir_blkno; 201 u64 system_dir_blkno;
203 u64 bitmap_blkno; 202 u64 bitmap_blkno;
@@ -237,6 +236,7 @@ struct ocfs2_super
237 236
238 enum ocfs2_local_alloc_state local_alloc_state; 237 enum ocfs2_local_alloc_state local_alloc_state;
239 struct buffer_head *local_alloc_bh; 238 struct buffer_head *local_alloc_bh;
239 u64 la_last_gd;
240 240
241 /* Next two fields are for local node slot recovery during 241 /* Next two fields are for local node slot recovery during
242 * mount. */ 242 * mount. */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 195523090c8..9d91e66f51a 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode,
70 struct buffer_head *group_bh, 70 struct buffer_head *group_bh,
71 u32 bits_wanted, u32 min_bits, 71 u32 bits_wanted, u32 min_bits,
72 u16 *bit_off, u16 *bits_found); 72 u16 *bit_off, u16 *bits_found);
73static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
74 u32 bits_wanted,
75 u32 min_bits,
76 u16 *bit_off,
77 unsigned int *num_bits,
78 u64 *bg_blkno);
79static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, 73static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
80 struct ocfs2_alloc_context *ac, 74 struct ocfs2_alloc_context *ac,
81 u32 bits_wanted, 75 u32 bits_wanted,
@@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
85 u64 *bg_blkno); 79 u64 *bg_blkno);
86static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, 80static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
87 int nr); 81 int nr);
88static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
89 struct buffer_head *bg_bh,
90 unsigned int bits_wanted,
91 u16 *bit_off,
92 u16 *bits_found);
93static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, 82static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
94 struct inode *alloc_inode, 83 struct inode *alloc_inode,
95 struct ocfs2_group_desc *bg, 84 struct ocfs2_group_desc *bg,
@@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
143 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); 132 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
144} 133}
145 134
135/* somewhat more expensive than our other checks, so use sparingly. */
136static int ocfs2_check_group_descriptor(struct super_block *sb,
137 struct ocfs2_dinode *di,
138 struct ocfs2_group_desc *gd)
139{
140 unsigned int max_bits;
141
142 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
143 OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd);
144 return -EIO;
145 }
146
147 if (di->i_blkno != gd->bg_parent_dinode) {
148 ocfs2_error(sb, "Group descriptor # %llu has bad parent "
149 "pointer (%llu, expected %llu)",
150 (unsigned long long)le64_to_cpu(gd->bg_blkno),
151 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
152 (unsigned long long)le64_to_cpu(di->i_blkno));
153 return -EIO;
154 }
155
156 max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
157 if (le16_to_cpu(gd->bg_bits) > max_bits) {
158 ocfs2_error(sb, "Group descriptor # %llu has bit count of %u",
159 (unsigned long long)le64_to_cpu(gd->bg_blkno),
160 le16_to_cpu(gd->bg_bits));
161 return -EIO;
162 }
163
164 if (le16_to_cpu(gd->bg_chain) >=
165 le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
166 ocfs2_error(sb, "Group descriptor # %llu has bad chain %u",
167 (unsigned long long)le64_to_cpu(gd->bg_blkno),
168 le16_to_cpu(gd->bg_chain));
169 return -EIO;
170 }
171
172 if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
173 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
174 "claims that %u are free",
175 (unsigned long long)le64_to_cpu(gd->bg_blkno),
176 le16_to_cpu(gd->bg_bits),
177 le16_to_cpu(gd->bg_free_bits_count));
178 return -EIO;
179 }
180
181 if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
182 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
183 "max bitmap bits of %u",
184 (unsigned long long)le64_to_cpu(gd->bg_blkno),
185 le16_to_cpu(gd->bg_bits),
186 8 * le16_to_cpu(gd->bg_size));
187 return -EIO;
188 }
189
190 return 0;
191}
192
146static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, 193static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
147 struct inode *alloc_inode, 194 struct inode *alloc_inode,
148 struct buffer_head *bg_bh, 195 struct buffer_head *bg_bh,
@@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
663static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, 710static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
664 struct buffer_head *bg_bh, 711 struct buffer_head *bg_bh,
665 unsigned int bits_wanted, 712 unsigned int bits_wanted,
713 unsigned int total_bits,
666 u16 *bit_off, 714 u16 *bit_off,
667 u16 *bits_found) 715 u16 *bits_found)
668{ 716{
@@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
679 found = start = best_offset = best_size = 0; 727 found = start = best_offset = best_size = 0;
680 bitmap = bg->bg_bitmap; 728 bitmap = bg->bg_bitmap;
681 729
682 while((offset = ocfs2_find_next_zero_bit(bitmap, 730 while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
683 le16_to_cpu(bg->bg_bits), 731 if (offset == total_bits)
684 start)) != -1) {
685 if (offset == le16_to_cpu(bg->bg_bits))
686 break; 732 break;
687 733
688 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { 734 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
@@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode,
911{ 957{
912 int search = -ENOSPC; 958 int search = -ENOSPC;
913 int ret; 959 int ret;
914 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; 960 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
915 u16 tmp_off, tmp_found; 961 u16 tmp_off, tmp_found;
962 unsigned int max_bits, gd_cluster_off;
916 963
917 BUG_ON(!ocfs2_is_cluster_bitmap(inode)); 964 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
918 965
919 if (bg->bg_free_bits_count) { 966 if (gd->bg_free_bits_count) {
967 max_bits = le16_to_cpu(gd->bg_bits);
968
969 /* Tail groups in cluster bitmaps which aren't cpg
970 * aligned are prone to partial extention by a failed
971 * fs resize. If the file system resize never got to
972 * update the dinode cluster count, then we don't want
973 * to trust any clusters past it, regardless of what
974 * the group descriptor says. */
975 gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
976 le64_to_cpu(gd->bg_blkno));
977 if ((gd_cluster_off + max_bits) >
978 OCFS2_I(inode)->ip_clusters) {
979 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
980 mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
981 (unsigned long long)le64_to_cpu(gd->bg_blkno),
982 le16_to_cpu(gd->bg_bits),
983 OCFS2_I(inode)->ip_clusters, max_bits);
984 }
985
920 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), 986 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
921 group_bh, bits_wanted, 987 group_bh, bits_wanted,
988 max_bits,
922 &tmp_off, &tmp_found); 989 &tmp_off, &tmp_found);
923 if (ret) 990 if (ret)
924 return ret; 991 return ret;
@@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode,
951 if (bg->bg_free_bits_count) 1018 if (bg->bg_free_bits_count)
952 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), 1019 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
953 group_bh, bits_wanted, 1020 group_bh, bits_wanted,
1021 le16_to_cpu(bg->bg_bits),
954 bit_off, bits_found); 1022 bit_off, bits_found);
955 1023
956 return ret; 1024 return ret;
957} 1025}
958 1026
1027static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
1028 struct ocfs2_journal_handle *handle,
1029 struct buffer_head *di_bh,
1030 u32 num_bits,
1031 u16 chain)
1032{
1033 int ret;
1034 u32 tmp_used;
1035 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
1036 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
1037
1038 ret = ocfs2_journal_access(handle, inode, di_bh,
1039 OCFS2_JOURNAL_ACCESS_WRITE);
1040 if (ret < 0) {
1041 mlog_errno(ret);
1042 goto out;
1043 }
1044
1045 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
1046 di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
1047 le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
1048
1049 ret = ocfs2_journal_dirty(handle, di_bh);
1050 if (ret < 0)
1051 mlog_errno(ret);
1052
1053out:
1054 return ret;
1055}
1056
1057static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1058 u32 bits_wanted,
1059 u32 min_bits,
1060 u16 *bit_off,
1061 unsigned int *num_bits,
1062 u64 gd_blkno,
1063 u16 *bits_left)
1064{
1065 int ret;
1066 u16 found;
1067 struct buffer_head *group_bh = NULL;
1068 struct ocfs2_group_desc *gd;
1069 struct inode *alloc_inode = ac->ac_inode;
1070 struct ocfs2_journal_handle *handle = ac->ac_handle;
1071
1072 ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
1073 &group_bh, OCFS2_BH_CACHED, alloc_inode);
1074 if (ret < 0) {
1075 mlog_errno(ret);
1076 return ret;
1077 }
1078
1079 gd = (struct ocfs2_group_desc *) group_bh->b_data;
1080 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
1081 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd);
1082 ret = -EIO;
1083 goto out;
1084 }
1085
1086 ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
1087 bit_off, &found);
1088 if (ret < 0) {
1089 if (ret != -ENOSPC)
1090 mlog_errno(ret);
1091 goto out;
1092 }
1093
1094 *num_bits = found;
1095
1096 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1097 *num_bits,
1098 le16_to_cpu(gd->bg_chain));
1099 if (ret < 0) {
1100 mlog_errno(ret);
1101 goto out;
1102 }
1103
1104 ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
1105 *bit_off, *num_bits);
1106 if (ret < 0)
1107 mlog_errno(ret);
1108
1109 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1110
1111out:
1112 brelse(group_bh);
1113
1114 return ret;
1115}
1116
959static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, 1117static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
960 u32 bits_wanted, 1118 u32 bits_wanted,
961 u32 min_bits, 1119 u32 min_bits,
962 u16 *bit_off, 1120 u16 *bit_off,
963 unsigned int *num_bits, 1121 unsigned int *num_bits,
964 u64 *bg_blkno) 1122 u64 *bg_blkno,
1123 u16 *bits_left)
965{ 1124{
966 int status; 1125 int status;
967 u16 chain, tmp_bits; 1126 u16 chain, tmp_bits;
@@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
988 goto bail; 1147 goto bail;
989 } 1148 }
990 bg = (struct ocfs2_group_desc *) group_bh->b_data; 1149 bg = (struct ocfs2_group_desc *) group_bh->b_data;
991 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { 1150 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
992 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); 1151 if (status) {
993 status = -EIO; 1152 mlog_errno(status);
994 goto bail; 1153 goto bail;
995 } 1154 }
996 1155
@@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1018 goto bail; 1177 goto bail;
1019 } 1178 }
1020 bg = (struct ocfs2_group_desc *) group_bh->b_data; 1179 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1021 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { 1180 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
1022 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); 1181 if (status) {
1023 status = -EIO; 1182 mlog_errno(status);
1024 goto bail; 1183 goto bail;
1025 } 1184 }
1026 } 1185 }
@@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1099 (unsigned long long)fe->i_blkno); 1258 (unsigned long long)fe->i_blkno);
1100 1259
1101 *bg_blkno = le64_to_cpu(bg->bg_blkno); 1260 *bg_blkno = le64_to_cpu(bg->bg_blkno);
1261 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1102bail: 1262bail:
1103 if (group_bh) 1263 if (group_bh)
1104 brelse(group_bh); 1264 brelse(group_bh);
@@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1120{ 1280{
1121 int status; 1281 int status;
1122 u16 victim, i; 1282 u16 victim, i;
1283 u16 bits_left = 0;
1284 u64 hint_blkno = ac->ac_last_group;
1123 struct ocfs2_chain_list *cl; 1285 struct ocfs2_chain_list *cl;
1124 struct ocfs2_dinode *fe; 1286 struct ocfs2_dinode *fe;
1125 1287
@@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1146 goto bail; 1308 goto bail;
1147 } 1309 }
1148 1310
1311 if (hint_blkno) {
1312 /* Attempt to short-circuit the usual search mechanism
1313 * by jumping straight to the most recently used
1314 * allocation group. This helps us mantain some
1315 * contiguousness across allocations. */
1316 status = ocfs2_search_one_group(ac, bits_wanted, min_bits,
1317 bit_off, num_bits,
1318 hint_blkno, &bits_left);
1319 if (!status) {
1320 /* Be careful to update *bg_blkno here as the
1321 * caller is expecting it to be filled in, and
1322 * ocfs2_search_one_group() won't do that for
1323 * us. */
1324 *bg_blkno = hint_blkno;
1325 goto set_hint;
1326 }
1327 if (status < 0 && status != -ENOSPC) {
1328 mlog_errno(status);
1329 goto bail;
1330 }
1331 }
1332
1149 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; 1333 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1150 1334
1151 victim = ocfs2_find_victim_chain(cl); 1335 victim = ocfs2_find_victim_chain(cl);
@@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1153 ac->ac_allow_chain_relink = 1; 1337 ac->ac_allow_chain_relink = 1;
1154 1338
1155 status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, 1339 status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
1156 num_bits, bg_blkno); 1340 num_bits, bg_blkno, &bits_left);
1157 if (!status) 1341 if (!status)
1158 goto bail; 1342 goto set_hint;
1159 if (status < 0 && status != -ENOSPC) { 1343 if (status < 0 && status != -ENOSPC) {
1160 mlog_errno(status); 1344 mlog_errno(status);
1161 goto bail; 1345 goto bail;
@@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1177 1361
1178 ac->ac_chain = i; 1362 ac->ac_chain = i;
1179 status = ocfs2_search_chain(ac, bits_wanted, min_bits, 1363 status = ocfs2_search_chain(ac, bits_wanted, min_bits,
1180 bit_off, num_bits, 1364 bit_off, num_bits, bg_blkno,
1181 bg_blkno); 1365 &bits_left);
1182 if (!status) 1366 if (!status)
1183 break; 1367 break;
1184 if (status < 0 && status != -ENOSPC) { 1368 if (status < 0 && status != -ENOSPC) {
@@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1186 goto bail; 1370 goto bail;
1187 } 1371 }
1188 } 1372 }
1189bail:
1190 1373
1374set_hint:
1375 if (status != -ENOSPC) {
1376 /* If the next search of this group is not likely to
1377 * yield a suitable extent, then we reset the last
1378 * group hint so as to not waste a disk read */
1379 if (bits_left < min_bits)
1380 ac->ac_last_group = 0;
1381 else
1382 ac->ac_last_group = *bg_blkno;
1383 }
1384
1385bail:
1191 mlog_exit(status); 1386 mlog_exit(status);
1192 return status; 1387 return status;
1193} 1388}
@@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
1341{ 1536{
1342 int status; 1537 int status;
1343 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; 1538 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
1344 u64 bg_blkno; 1539 u64 bg_blkno = 0;
1345 u16 bg_bit_off; 1540 u16 bg_bit_off;
1346 1541
1347 mlog_entry_void(); 1542 mlog_entry_void();
@@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
1494 } 1689 }
1495 1690
1496 group = (struct ocfs2_group_desc *) group_bh->b_data; 1691 group = (struct ocfs2_group_desc *) group_bh->b_data;
1497 if (!OCFS2_IS_VALID_GROUP_DESC(group)) { 1692 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
1498 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); 1693 if (status) {
1499 status = -EIO; 1694 mlog_errno(status);
1500 goto bail; 1695 goto bail;
1501 } 1696 }
1502 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); 1697 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a76c82a7cea..c787838d105 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -49,6 +49,8 @@ struct ocfs2_alloc_context {
49 u16 ac_chain; 49 u16 ac_chain;
50 int ac_allow_chain_relink; 50 int ac_allow_chain_relink;
51 group_search_t *ac_group_search; 51 group_search_t *ac_group_search;
52
53 u64 ac_last_group;
52}; 54};
53 55
54void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); 56void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 382706a67ff..d17e33e66a1 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1442,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb,
1442 1442
1443 osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; 1443 osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
1444 1444
1445 /* We don't have a cluster lock on the bitmap here because
1446 * we're only interested in static information and the extra
1447 * complexity at mount time isn't worht it. Don't pass the
1448 * inode in to the read function though as we don't want it to
1449 * be put in the cache. */
1445 status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, 1450 status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0,
1446 inode); 1451 NULL);
1447 iput(inode); 1452 iput(inode);
1448 if (status < 0) { 1453 if (status < 0) {
1449 mlog_errno(status); 1454 mlog_errno(status);
@@ -1452,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
1452 1457
1453 di = (struct ocfs2_dinode *) bitmap_bh->b_data; 1458 di = (struct ocfs2_dinode *) bitmap_bh->b_data;
1454 osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); 1459 osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
1455 osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total);
1456 brelse(bitmap_bh); 1460 brelse(bitmap_bh);
1457 mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", 1461 mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n",
1458 (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); 1462 (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg);
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index abe91ca03ed..0a5927c806c 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); 74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
75 for (i = 0; i < 8; i++, p++) { 75 for (i = 0; i < 8; i++, p++) {
76 unsigned long st_sector; 76 unsigned long st_sector;
77 int num_sectors; 77 unsigned int num_sectors;
78 78
79 st_sector = be32_to_cpu(p->start_cylinder) * spc; 79 st_sector = be32_to_cpu(p->start_cylinder) * spc;
80 num_sectors = be32_to_cpu(p->num_sectors); 80 num_sectors = be32_to_cpu(p->num_sectors);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 9f2cfc30f9c..94215622544 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
169 "Mapped: %8lu kB\n" 169 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n" 170 "Slab: %8lu kB\n"
171 "PageTables: %8lu kB\n" 171 "PageTables: %8lu kB\n"
172 "NFS Unstable: %8lu kB\n" 172 "NFS_Unstable: %8lu kB\n"
173 "Bounce: %8lu kB\n" 173 "Bounce: %8lu kB\n"
174 "CommitLimit: %8lu kB\n" 174 "CommitLimit: %8lu kB\n"
175 "Committed_AS: %8lu kB\n" 175 "Committed_AS: %8lu kB\n"
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 39fedaa88a0..d935fb9394e 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
424 int res = -ENOTDIR; 424 int res = -ENOTDIR;
425 if (!file->f_op || !file->f_op->readdir) 425 if (!file->f_op || !file->f_op->readdir)
426 goto out; 426 goto out;
427 mutex_lock(&inode->i_mutex); 427 mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR);
428// down(&inode->i_zombie); 428// down(&inode->i_zombie);
429 res = -ENOENT; 429 res = -ENOENT;
430 if (!IS_DEADDIR(inode)) { 430 if (!IS_DEADDIR(inode)) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4df822c881b..fcce1a21a51 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); 115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
116 if (!ei) 116 if (!ei)
117 return NULL; 117 return NULL;
118
119 ei->i_unique = 0;
120 ei->i_lenExtents = 0;
121 ei->i_next_alloc_block = 0;
122 ei->i_next_alloc_goal = 0;
123 ei->i_strat4096 = 0;
124
118 return &ei->vfs_inode; 125 return &ei->vfs_inode;
119} 126}
120 127
@@ -1652,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1652 iput(inode); 1659 iput(inode);
1653 goto error_out; 1660 goto error_out;
1654 } 1661 }
1655 sb->s_maxbytes = MAX_LFS_FILESIZE; 1662 sb->s_maxbytes = 1<<30;
1656 return 0; 1663 return 0;
1657 1664
1658error_out: 1665error_out:
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index e1b0e8cfecb..0abd66ce36e 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode)
239 { 239 {
240 if (offset) 240 if (offset)
241 { 241 {
242 extoffset -= adsize; 242 /*
243 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); 243 * OK, there is not extent covering inode->i_size and
244 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 244 * no extent above inode->i_size => truncate is
245 { 245 * extending the file by 'offset'.
246 extoffset -= adsize; 246 */
247 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); 247 if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) ||
248 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); 248 (bh && extoffset == sizeof(struct allocExtDesc))) {
249 /* File has no extents at all! */
250 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
251 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
252 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
249 } 253 }
250 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 254 else {
251 {
252 kernel_lb_addr neloc = { 0, 0 };
253 extoffset -= adsize; 255 extoffset -= adsize;
254 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | 256 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1);
255 ((elen + offset + inode->i_sb->s_blocksize - 1) & 257 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
256 ~(inode->i_sb->s_blocksize - 1)); 258 {
257 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); 259 extoffset -= adsize;
258 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); 260 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset);
259 } 261 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0);
260 else 262 }
261 { 263 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
262 if (elen & (inode->i_sb->s_blocksize - 1))
263 { 264 {
265 kernel_lb_addr neloc = { 0, 0 };
264 extoffset -= adsize; 266 extoffset -= adsize;
265 elen = EXT_RECORDED_ALLOCATED | 267 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED |
266 ((elen + inode->i_sb->s_blocksize - 1) & 268 ((elen + offset + inode->i_sb->s_blocksize - 1) &
267 ~(inode->i_sb->s_blocksize - 1)); 269 ~(inode->i_sb->s_blocksize - 1));
268 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); 270 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
271 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1);
272 }
273 else
274 {
275 if (elen & (inode->i_sb->s_blocksize - 1))
276 {
277 extoffset -= adsize;
278 elen = EXT_RECORDED_ALLOCATED |
279 ((elen + inode->i_sb->s_blocksize - 1) &
280 ~(inode->i_sb->s_blocksize - 1));
281 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1);
282 }
283 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
284 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
285 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
269 } 286 }
270 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
271 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
272 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
273 } 287 }
274 } 288 }
275 } 289 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index e7c8615beb6..30c6e8a9446 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
169 169
170static struct buffer_head * 170static struct buffer_head *
171ufs_clear_frags(struct inode *inode, sector_t beg, 171ufs_clear_frags(struct inode *inode, sector_t beg,
172 unsigned int n) 172 unsigned int n, sector_t want)
173{ 173{
174 struct buffer_head *res, *bh; 174 struct buffer_head *res = NULL, *bh;
175 sector_t end = beg + n; 175 sector_t end = beg + n;
176 176
177 res = sb_getblk(inode->i_sb, beg); 177 for (; beg < end; ++beg) {
178 ufs_clear_frag(inode, res);
179 for (++beg; beg < end; ++beg) {
180 bh = sb_getblk(inode->i_sb, beg); 178 bh = sb_getblk(inode->i_sb, beg);
181 ufs_clear_frag(inode, bh); 179 ufs_clear_frag(inode, bh);
182 brelse(bh); 180 if (want != beg)
181 brelse(bh);
182 else
183 res = bh;
183 } 184 }
185 BUG_ON(!res);
184 return res; 186 return res;
185} 187}
186 188
@@ -265,7 +267,9 @@ repeat:
265 lastfrag = ufsi->i_lastfrag; 267 lastfrag = ufsi->i_lastfrag;
266 268
267 } 269 }
268 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 270 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]);
271 if (tmp)
272 goal = tmp + uspi->s_fpb;
269 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 273 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
270 goal, required + blockoff, 274 goal, required + blockoff,
271 err, locked_page); 275 err, locked_page);
@@ -277,13 +281,15 @@ repeat:
277 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 281 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
278 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 282 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
279 err, locked_page); 283 err, locked_page);
280 } 284 } else /* (lastblock > block) */ {
281 /* 285 /*
282 * We will allocate new block before last allocated block 286 * We will allocate new block before last allocated block
283 */ 287 */
284 else /* (lastblock > block) */ { 288 if (block) {
285 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 289 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]);
286 goal = tmp + uspi->s_fpb; 290 if (tmp)
291 goal = tmp + uspi->s_fpb;
292 }
287 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 293 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
288 goal, uspi->s_fpb, err, locked_page); 294 goal, uspi->s_fpb, err, locked_page);
289 } 295 }
@@ -296,7 +302,7 @@ repeat:
296 } 302 }
297 303
298 if (!phys) { 304 if (!phys) {
299 result = ufs_clear_frags(inode, tmp + blockoff, required); 305 result = ufs_clear_frags(inode, tmp, required, tmp + blockoff);
300 } else { 306 } else {
301 *phys = tmp + blockoff; 307 *phys = tmp + blockoff;
302 result = NULL; 308 result = NULL;
@@ -383,7 +389,7 @@ repeat:
383 } 389 }
384 } 390 }
385 391
386 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) 392 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1])))
387 goal = tmp + uspi->s_fpb; 393 goal = tmp + uspi->s_fpb;
388 else 394 else
389 goal = bh->b_blocknr + uspi->s_fpb; 395 goal = bh->b_blocknr + uspi->s_fpb;
@@ -397,7 +403,8 @@ repeat:
397 403
398 404
399 if (!phys) { 405 if (!phys) {
400 result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); 406 result = ufs_clear_frags(inode, tmp, uspi->s_fpb,
407 tmp + blockoff);
401 } else { 408 } else {
402 *phys = tmp + blockoff; 409 *phys = tmp + blockoff;
403 *new = 1; 410 *new = 1;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index c9b55872079..ea11d04c41a 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode)
375 int err = 0; 375 int err = 0;
376 struct address_space *mapping = inode->i_mapping; 376 struct address_space *mapping = inode->i_mapping;
377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
378 struct ufs_inode_info *ufsi = UFS_I(inode);
379 unsigned lastfrag, i, end; 378 unsigned lastfrag, i, end;
380 struct page *lastpage; 379 struct page *lastpage;
381 struct buffer_head *bh; 380 struct buffer_head *bh;
382 381
383 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; 382 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
384 383
385 if (!lastfrag) { 384 if (!lastfrag)
386 ufsi->i_lastfrag = 0;
387 goto out; 385 goto out;
388 } 386
389 lastfrag--; 387 lastfrag--;
390 388
391 lastpage = ufs_get_locked_page(mapping, lastfrag >> 389 lastpage = ufs_get_locked_page(mapping, lastfrag >>
@@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode)
400 for (i = 0; i < end; ++i) 398 for (i = 0; i < end; ++i)
401 bh = bh->b_this_page; 399 bh = bh->b_this_page;
402 400
403 if (!buffer_mapped(bh)) { 401
404 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 402 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
405 403
406 if (unlikely(err)) 404 if (unlikely(err))
407 goto out_unlock; 405 goto out_unlock;
408 406
409 if (buffer_new(bh)) { 407 if (buffer_new(bh)) {
410 clear_buffer_new(bh); 408 clear_buffer_new(bh);
411 unmap_underlying_metadata(bh->b_bdev, 409 unmap_underlying_metadata(bh->b_bdev,
412 bh->b_blocknr); 410 bh->b_blocknr);
413 /* 411 /*
414 * we do not zeroize fragment, because of 412 * we do not zeroize fragment, because of
415 * if it maped to hole, it already contains zeroes 413 * if it maped to hole, it already contains zeroes
416 */ 414 */
417 set_buffer_uptodate(bh); 415 set_buffer_uptodate(bh);
418 mark_buffer_dirty(bh); 416 mark_buffer_dirty(bh);
419 set_page_dirty(lastpage); 417 set_page_dirty(lastpage);
420 }
421 } 418 }
419
422out_unlock: 420out_unlock:
423 ufs_put_locked_page(lastpage); 421 ufs_put_locked_page(lastpage);
424out: 422out:
@@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
440 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 438 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
441 return -EPERM; 439 return -EPERM;
442 440
443 if (inode->i_size > old_i_size) { 441 err = ufs_alloc_lastblock(inode);
444 /*
445 * if we expand file we should care about
446 * allocation of block for last byte first of all
447 */
448 err = ufs_alloc_lastblock(inode);
449 442
450 if (err) { 443 if (err) {
451 i_size_write(inode, old_i_size); 444 i_size_write(inode, old_i_size);
452 goto out; 445 goto out;
453 }
454 /*
455 * go away, because of we expand file, and we do not
456 * need free blocks, and zeroizes page
457 */
458 lock_kernel();
459 goto almost_end;
460 } 446 }
461 447
462 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); 448 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
@@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
477 yield(); 463 yield();
478 } 464 }
479 465
480 if (inode->i_size < old_i_size) {
481 /*
482 * now we should have enough space
483 * to allocate block for last byte
484 */
485 err = ufs_alloc_lastblock(inode);
486 if (err)
487 /*
488 * looks like all the same - we have no space,
489 * but we truncate file already
490 */
491 inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize;
492 }
493almost_end:
494 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 466 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
467 ufsi->i_lastfrag = DIRECT_FRAGMENT;
495 unlock_kernel(); 468 unlock_kernel();
496 mark_inode_dirty(inode); 469 mark_inode_dirty(inode);
497out: 470out:
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index eef6763f3a6..d2bbcd882a6 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist(
1835 &agbp))) 1835 &agbp)))
1836 return error; 1836 return error;
1837 if (!pag->pagf_init) { 1837 if (!pag->pagf_init) {
1838 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1839 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1838 args->agbp = NULL; 1840 args->agbp = NULL;
1839 return 0; 1841 return 0;
1840 } 1842 }
1841 } else 1843 } else
1842 agbp = NULL; 1844 agbp = NULL;
1843 1845
1844 /* If this is a metadata preferred pag and we are user data 1846 /*
1847 * If this is a metadata preferred pag and we are user data
1845 * then try somewhere else if we are not being asked to 1848 * then try somewhere else if we are not being asked to
1846 * try harder at this point 1849 * try harder at this point
1847 */ 1850 */
1848 if (pag->pagf_metadata && args->userdata && flags) { 1851 if (pag->pagf_metadata && args->userdata &&
1852 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1853 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1849 args->agbp = NULL; 1854 args->agbp = NULL;
1850 return 0; 1855 return 0;
1851 } 1856 }
1852 1857
1853 need = XFS_MIN_FREELIST_PAG(pag, mp); 1858 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1854 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; 1859 need = XFS_MIN_FREELIST_PAG(pag, mp);
1855 /* 1860 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
1856 * If it looks like there isn't a long enough extent, or enough 1861 /*
1857 * total blocks, reject it. 1862 * If it looks like there isn't a long enough extent, or enough
1858 */ 1863 * total blocks, reject it.
1859 longest = (pag->pagf_longest > delta) ? 1864 */
1860 (pag->pagf_longest - delta) : 1865 longest = (pag->pagf_longest > delta) ?
1861 (pag->pagf_flcount > 0 || pag->pagf_longest > 0); 1866 (pag->pagf_longest - delta) :
1862 if (args->minlen + args->alignment + args->minalignslop - 1 > longest || 1867 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
1863 (!(flags & XFS_ALLOC_FLAG_FREEING) && 1868 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1864 (int)(pag->pagf_freeblks + pag->pagf_flcount - 1869 longest ||
1865 need - args->total) < 1870 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1866 (int)args->minleft)) { 1871 need - args->total) < (int)args->minleft)) {
1867 if (agbp) 1872 if (agbp)
1868 xfs_trans_brelse(tp, agbp); 1873 xfs_trans_brelse(tp, agbp);
1869 args->agbp = NULL; 1874 args->agbp = NULL;
1870 return 0; 1875 return 0;
1876 }
1871 } 1877 }
1878
1872 /* 1879 /*
1873 * Get the a.g. freespace buffer. 1880 * Get the a.g. freespace buffer.
1874 * Can fail if we're not blocking on locks, and it's held. 1881 * Can fail if we're not blocking on locks, and it's held.
@@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist(
1878 &agbp))) 1885 &agbp)))
1879 return error; 1886 return error;
1880 if (agbp == NULL) { 1887 if (agbp == NULL) {
1888 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1889 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1881 args->agbp = NULL; 1890 args->agbp = NULL;
1882 return 0; 1891 return 0;
1883 } 1892 }
@@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist(
1887 */ 1896 */
1888 agf = XFS_BUF_TO_AGF(agbp); 1897 agf = XFS_BUF_TO_AGF(agbp);
1889 need = XFS_MIN_FREELIST(agf, mp); 1898 need = XFS_MIN_FREELIST(agf, mp);
1890 delta = need > be32_to_cpu(agf->agf_flcount) ?
1891 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1892 /* 1899 /*
1893 * If there isn't enough total or single-extent, reject it. 1900 * If there isn't enough total or single-extent, reject it.
1894 */ 1901 */
1895 longest = be32_to_cpu(agf->agf_longest); 1902 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1896 longest = (longest > delta) ? (longest - delta) : 1903 delta = need > be32_to_cpu(agf->agf_flcount) ?
1897 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); 1904 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1898 if (args->minlen + args->alignment + args->minalignslop - 1 > longest || 1905 longest = be32_to_cpu(agf->agf_longest);
1899 (!(flags & XFS_ALLOC_FLAG_FREEING) && 1906 longest = (longest > delta) ? (longest - delta) :
1900 (int)(be32_to_cpu(agf->agf_freeblks) + 1907 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1901 be32_to_cpu(agf->agf_flcount) - need - args->total) < 1908 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1902 (int)args->minleft)) { 1909 longest ||
1903 xfs_trans_brelse(tp, agbp); 1910 ((int)(be32_to_cpu(agf->agf_freeblks) +
1904 args->agbp = NULL; 1911 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1905 return 0; 1912 (int)args->minleft)) {
1913 xfs_trans_brelse(tp, agbp);
1914 args->agbp = NULL;
1915 return 0;
1916 }
1906 } 1917 }
1907 /* 1918 /*
1908 * Make the freelist shorter if it's too long. 1919 * Make the freelist shorter if it's too long.
@@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist(
1950 * on a completely full ag. 1961 * on a completely full ag.
1951 */ 1962 */
1952 if (targs.agbno == NULLAGBLOCK) { 1963 if (targs.agbno == NULLAGBLOCK) {
1953 if (!(flags & XFS_ALLOC_FLAG_FREEING)) { 1964 if (flags & XFS_ALLOC_FLAG_FREEING)
1954 xfs_trans_brelse(tp, agflbp); 1965 break;
1955 args->agbp = NULL; 1966 xfs_trans_brelse(tp, agflbp);
1956 return 0; 1967 args->agbp = NULL;
1957 } 1968 return 0;
1958 break;
1959 } 1969 }
1960 /* 1970 /*
1961 * Put each allocated block on the list. 1971 * Put each allocated block on the list.
@@ -2442,31 +2452,26 @@ xfs_free_extent(
2442 xfs_fsblock_t bno, /* starting block number of extent */ 2452 xfs_fsblock_t bno, /* starting block number of extent */
2443 xfs_extlen_t len) /* length of extent */ 2453 xfs_extlen_t len) /* length of extent */
2444{ 2454{
2445#ifdef DEBUG 2455 xfs_alloc_arg_t args;
2446 xfs_agf_t *agf; /* a.g. freespace header */
2447#endif
2448 xfs_alloc_arg_t args; /* allocation argument structure */
2449 int error; 2456 int error;
2450 2457
2451 ASSERT(len != 0); 2458 ASSERT(len != 0);
2459 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2452 args.tp = tp; 2460 args.tp = tp;
2453 args.mp = tp->t_mountp; 2461 args.mp = tp->t_mountp;
2454 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2462 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2455 ASSERT(args.agno < args.mp->m_sb.sb_agcount); 2463 ASSERT(args.agno < args.mp->m_sb.sb_agcount);
2456 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2464 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2457 args.alignment = 1;
2458 args.minlen = args.minleft = args.minalignslop = 0;
2459 down_read(&args.mp->m_peraglock); 2465 down_read(&args.mp->m_peraglock);
2460 args.pag = &args.mp->m_perag[args.agno]; 2466 args.pag = &args.mp->m_perag[args.agno];
2461 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) 2467 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
2462 goto error0; 2468 goto error0;
2463#ifdef DEBUG 2469#ifdef DEBUG
2464 ASSERT(args.agbp != NULL); 2470 ASSERT(args.agbp != NULL);
2465 agf = XFS_BUF_TO_AGF(args.agbp); 2471 ASSERT((args.agbno + len) <=
2466 ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); 2472 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
2467#endif 2473#endif
2468 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, 2474 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2469 len, 0);
2470error0: 2475error0:
2471 up_read(&args.mp->m_peraglock); 2476 up_read(&args.mp->m_peraglock);
2472 return error; 2477 return error;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3a613753906..bf46fae303a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4993,7 +4993,7 @@ xfs_bmapi(
4993 bma.firstblock = *firstblock; 4993 bma.firstblock = *firstblock;
4994 bma.alen = alen; 4994 bma.alen = alen;
4995 bma.off = aoff; 4995 bma.off = aoff;
4996 bma.conv = (flags & XFS_BMAPI_CONVERT); 4996 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4997 bma.wasdel = wasdelay; 4997 bma.wasdel = wasdelay;
4998 bma.minlen = minlen; 4998 bma.minlen = minlen;
4999 bma.low = flist->xbf_low; 4999 bma.low = flist->xbf_low;
diff --git a/include/asm-arm/arch-pxa/ssp.h b/include/asm-arm/arch-pxa/ssp.h
index 949878c0d90..ea200551a75 100644
--- a/include/asm-arm/arch-pxa/ssp.h
+++ b/include/asm-arm/arch-pxa/ssp.h
@@ -40,8 +40,8 @@ struct ssp_dev {
40}; 40};
41 41
42int ssp_write_word(struct ssp_dev *dev, u32 data); 42int ssp_write_word(struct ssp_dev *dev, u32 data);
43int ssp_read_word(struct ssp_dev *dev); 43int ssp_read_word(struct ssp_dev *dev, u32 *data);
44void ssp_flush(struct ssp_dev *dev); 44int ssp_flush(struct ssp_dev *dev);
45void ssp_enable(struct ssp_dev *dev); 45void ssp_enable(struct ssp_dev *dev);
46void ssp_disable(struct ssp_dev *dev); 46void ssp_disable(struct ssp_dev *dev);
47void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp); 47void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp);
diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h
index 72964f9b841..7463fd5252c 100644
--- a/include/asm-arm/arch-s3c2410/dma.h
+++ b/include/asm-arm/arch-s3c2410/dma.h
@@ -104,6 +104,7 @@ enum s3c2410_chan_op_e {
104 S3C2410_DMAOP_RESUME, 104 S3C2410_DMAOP_RESUME,
105 S3C2410_DMAOP_FLUSH, 105 S3C2410_DMAOP_FLUSH,
106 S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */ 106 S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */
107 S3C2410_DMAOP_STARTED, /* indicate channel started */
107}; 108};
108 109
109typedef enum s3c2410_chan_op_e s3c2410_chan_op_t; 110typedef enum s3c2410_chan_op_e s3c2410_chan_op_t;
diff --git a/include/asm-arm/arch-s3c2410/regs-rtc.h b/include/asm-arm/arch-s3c2410/regs-rtc.h
index 228983f89bc..0fbec07bb6b 100644
--- a/include/asm-arm/arch-s3c2410/regs-rtc.h
+++ b/include/asm-arm/arch-s3c2410/regs-rtc.h
@@ -18,7 +18,7 @@
18#ifndef __ASM_ARCH_REGS_RTC_H 18#ifndef __ASM_ARCH_REGS_RTC_H
19#define __ASM_ARCH_REGS_RTC_H __FILE__ 19#define __ASM_ARCH_REGS_RTC_H __FILE__
20 20
21#define S3C2410_RTCREG(x) ((x) + S3C24XX_VA_RTC) 21#define S3C2410_RTCREG(x) (x)
22 22
23#define S3C2410_RTCCON S3C2410_RTCREG(0x40) 23#define S3C2410_RTCCON S3C2410_RTCREG(0x40)
24#define S3C2410_RTCCON_RTCEN (1<<0) 24#define S3C2410_RTCCON_RTCEN (1<<0)
diff --git a/include/asm-arm/hardware/ssp.h b/include/asm-arm/hardware/ssp.h
index 28aa11b769c..3b42e181997 100644
--- a/include/asm-arm/hardware/ssp.h
+++ b/include/asm-arm/hardware/ssp.h
@@ -16,8 +16,8 @@ struct ssp_state {
16}; 16};
17 17
18int ssp_write_word(u16 data); 18int ssp_write_word(u16 data);
19int ssp_read_word(void); 19int ssp_read_word(u16 *data);
20void ssp_flush(void); 20int ssp_flush(void);
21void ssp_enable(void); 21void ssp_enable(void);
22void ssp_disable(void); 22void ssp_disable(void);
23void ssp_save_state(struct ssp_state *ssp); 23void ssp_save_state(struct ssp_state *ssp);
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index b3479fc1cc8..bf7b9dea30f 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -291,5 +291,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
291 */ 291 */
292#define xlate_dev_kmem_ptr(p) p 292#define xlate_dev_kmem_ptr(p) p
293 293
294/*
295 * Register ISA memory and port locations for glibc iopl/inb/outb
296 * emulation.
297 */
298extern void register_isa_ports(unsigned int mmio, unsigned int io,
299 unsigned int io_shift);
300
294#endif /* __KERNEL__ */ 301#endif /* __KERNEL__ */
295#endif /* __ASM_ARM_IO_H */ 302#endif /* __ASM_ARM_IO_H */
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h
index edb7b6502fc..91a31adfa8a 100644
--- a/include/asm-arm/procinfo.h
+++ b/include/asm-arm/procinfo.h
@@ -55,5 +55,6 @@ extern unsigned int elf_hwcap;
55#define HWCAP_VFP 64 55#define HWCAP_VFP 64
56#define HWCAP_EDSP 128 56#define HWCAP_EDSP 128
57#define HWCAP_JAVA 256 57#define HWCAP_JAVA 256
58#define HWCAP_IWMMXT 512
58 59
59#endif 60#endif
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index 96adbabec74..b01a7ec409c 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -88,9 +88,6 @@ static inline void alternatives_smp_switch(int smp) {}
88/* 88/*
89 * Alternative inline assembly for SMP. 89 * Alternative inline assembly for SMP.
90 * 90 *
91 * alternative_smp() takes two versions (SMP first, UP second) and is
92 * for more complex stuff such as spinlocks.
93 *
94 * The LOCK_PREFIX macro defined here replaces the LOCK and 91 * The LOCK_PREFIX macro defined here replaces the LOCK and
95 * LOCK_PREFIX macros used everywhere in the source tree. 92 * LOCK_PREFIX macros used everywhere in the source tree.
96 * 93 *
@@ -110,21 +107,6 @@ static inline void alternatives_smp_switch(int smp) {}
110 */ 107 */
111 108
112#ifdef CONFIG_SMP 109#ifdef CONFIG_SMP
113#define alternative_smp(smpinstr, upinstr, args...) \
114 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
115 ".section .smp_altinstructions,\"a\"\n" \
116 " .align 4\n" \
117 " .long 661b\n" /* label */ \
118 " .long 663f\n" /* new instruction */ \
119 " .byte 0x68\n" /* X86_FEATURE_UP */ \
120 " .byte 662b-661b\n" /* sourcelen */ \
121 " .byte 664f-663f\n" /* replacementlen */ \
122 ".previous\n" \
123 ".section .smp_altinstr_replacement,\"awx\"\n" \
124 "663:\n\t" upinstr "\n" /* replacement */ \
125 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
126 ".previous" : args)
127
128#define LOCK_PREFIX \ 110#define LOCK_PREFIX \
129 ".section .smp_locks,\"a\"\n" \ 111 ".section .smp_locks,\"a\"\n" \
130 " .align 4\n" \ 112 " .align 4\n" \
@@ -133,8 +115,6 @@ static inline void alternatives_smp_switch(int smp) {}
133 "661:\n\tlock; " 115 "661:\n\tlock; "
134 116
135#else /* ! CONFIG_SMP */ 117#else /* ! CONFIG_SMP */
136#define alternative_smp(smpinstr, upinstr, args...) \
137 asm volatile (upinstr : args)
138#define LOCK_PREFIX "" 118#define LOCK_PREFIX ""
139#endif 119#endif
140 120
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index e33e9f9e4c6..22cb07cc8f3 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -14,7 +14,7 @@ extern struct pglist_data *node_data[];
14 14
15#ifdef CONFIG_X86_NUMAQ 15#ifdef CONFIG_X86_NUMAQ
16 #include <asm/numaq.h> 16 #include <asm/numaq.h>
17#else /* summit or generic arch */ 17#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */
18 #include <asm/srat.h> 18 #include <asm/srat.h>
19#endif 19#endif
20 20
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 96b0bef2ea5..3ac1ba98b1b 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -21,22 +21,20 @@
21#define RW_LOCK_BIAS_STR "0x01000000" 21#define RW_LOCK_BIAS_STR "0x01000000"
22 22
23#define __build_read_lock_ptr(rw, helper) \ 23#define __build_read_lock_ptr(rw, helper) \
24 alternative_smp("lock; subl $1,(%0)\n\t" \ 24 asm volatile(LOCK_PREFIX " ; subl $1,(%0)\n\t" \
25 "jns 1f\n" \ 25 "jns 1f\n" \
26 "call " helper "\n\t" \ 26 "call " helper "\n\t" \
27 "1:\n", \ 27 "1:\n" \
28 "subl $1,(%0)\n\t", \
29 :"a" (rw) : "memory") 28 :"a" (rw) : "memory")
30 29
31#define __build_read_lock_const(rw, helper) \ 30#define __build_read_lock_const(rw, helper) \
32 alternative_smp("lock; subl $1,%0\n\t" \ 31 asm volatile(LOCK_PREFIX " ; subl $1,%0\n\t" \
33 "jns 1f\n" \ 32 "jns 1f\n" \
34 "pushl %%eax\n\t" \ 33 "pushl %%eax\n\t" \
35 "leal %0,%%eax\n\t" \ 34 "leal %0,%%eax\n\t" \
36 "call " helper "\n\t" \ 35 "call " helper "\n\t" \
37 "popl %%eax\n\t" \ 36 "popl %%eax\n\t" \
38 "1:\n", \ 37 "1:\n" : \
39 "subl $1,%0\n\t", \
40 "+m" (*(volatile int *)rw) : : "memory") 38 "+m" (*(volatile int *)rw) : : "memory")
41 39
42#define __build_read_lock(rw, helper) do { \ 40#define __build_read_lock(rw, helper) do { \
@@ -47,7 +45,7 @@
47 } while (0) 45 } while (0)
48 46
49#define __build_write_lock_ptr(rw, helper) \ 47#define __build_write_lock_ptr(rw, helper) \
50 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 48 asm volatile(LOCK_PREFIX " ; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
51 "jz 1f\n" \ 49 "jz 1f\n" \
52 "call " helper "\n\t" \ 50 "call " helper "\n\t" \
53 "1:\n", \ 51 "1:\n", \
@@ -55,7 +53,7 @@
55 :"a" (rw) : "memory") 53 :"a" (rw) : "memory")
56 54
57#define __build_write_lock_const(rw, helper) \ 55#define __build_write_lock_const(rw, helper) \
58 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 56 asm volatile(LOCK_PREFIX " ; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
59 "jz 1f\n" \ 57 "jz 1f\n" \
60 "pushl %%eax\n\t" \ 58 "pushl %%eax\n\t" \
61 "leal %0,%%eax\n\t" \ 59 "leal %0,%%eax\n\t" \
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index d816c62a7a1..d1020363c41 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -22,7 +22,7 @@
22 22
23#define __raw_spin_lock_string \ 23#define __raw_spin_lock_string \
24 "\n1:\t" \ 24 "\n1:\t" \
25 "lock ; decb %0\n\t" \ 25 LOCK_PREFIX " ; decb %0\n\t" \
26 "jns 3f\n" \ 26 "jns 3f\n" \
27 "2:\t" \ 27 "2:\t" \
28 "rep;nop\n\t" \ 28 "rep;nop\n\t" \
@@ -38,7 +38,7 @@
38 */ 38 */
39#define __raw_spin_lock_string_flags \ 39#define __raw_spin_lock_string_flags \
40 "\n1:\t" \ 40 "\n1:\t" \
41 "lock ; decb %0\n\t" \ 41 LOCK_PREFIX " ; decb %0\n\t" \
42 "jns 5f\n" \ 42 "jns 5f\n" \
43 "2:\t" \ 43 "2:\t" \
44 "testl $0x200, %1\n\t" \ 44 "testl $0x200, %1\n\t" \
@@ -57,15 +57,9 @@
57 "jmp 4b\n" \ 57 "jmp 4b\n" \
58 "5:\n\t" 58 "5:\n\t"
59 59
60#define __raw_spin_lock_string_up \
61 "\n\tdecb %0"
62
63static inline void __raw_spin_lock(raw_spinlock_t *lock) 60static inline void __raw_spin_lock(raw_spinlock_t *lock)
64{ 61{
65 alternative_smp( 62 asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
66 __raw_spin_lock_string,
67 __raw_spin_lock_string_up,
68 "+m" (lock->slock) : : "memory");
69} 63}
70 64
71/* 65/*
@@ -76,10 +70,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
76#ifndef CONFIG_PROVE_LOCKING 70#ifndef CONFIG_PROVE_LOCKING
77static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
78{ 72{
79 alternative_smp( 73 asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
80 __raw_spin_lock_string_flags,
81 __raw_spin_lock_string_up,
82 "+m" (lock->slock) : "r" (flags) : "memory");
83} 74}
84#endif 75#endif
85 76
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index fc1c8ddae14..d983b74e4d9 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -324,8 +324,6 @@
324#define __NR_vmsplice 316 324#define __NR_vmsplice 316
325#define __NR_move_pages 317 325#define __NR_move_pages 317
326 326
327#ifdef __KERNEL__
328
329#define NR_syscalls 318 327#define NR_syscalls 318
330 328
331/* 329/*
@@ -425,6 +423,8 @@ __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
425__syscall_return(type,__res); \ 423__syscall_return(type,__res); \
426} 424}
427 425
426#ifdef __KERNEL__
427
428#define __ARCH_WANT_IPC_PARSE_VERSION 428#define __ARCH_WANT_IPC_PARSE_VERSION
429#define __ARCH_WANT_OLD_READDIR 429#define __ARCH_WANT_OLD_READDIR
430#define __ARCH_WANT_OLD_STAT 430#define __ARCH_WANT_OLD_STAT
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 69f0f1df672..4c1a0b96856 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -87,6 +87,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
87#else 87#else
88 88
89#define UNW_PC(frame) ((void)(frame), 0) 89#define UNW_PC(frame) ((void)(frame), 0)
90#define UNW_SP(frame) ((void)(frame), 0)
90 91
91static inline int arch_unw_user_mode(const void *info) 92static inline int arch_unw_user_mode(const void *info)
92{ 93{
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index bd4452bda35..ba826b3f75b 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -706,12 +706,9 @@ static inline int
706sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) 706sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
707{ 707{
708 struct ia64_sal_retval ret_stuff; 708 struct ia64_sal_retval ret_stuff;
709 unsigned long irq_flags;
710 709
711 local_irq_save(irq_flags);
712 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, 710 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
713 (u64)nasid_array, perms, 0, 0, 0); 711 (u64)nasid_array, perms, 0, 0, 0);
714 local_irq_restore(irq_flags);
715 return ret_stuff.status; 712 return ret_stuff.status;
716} 713}
717#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 714#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
@@ -1143,12 +1140,9 @@ static inline int
1143sn_inject_error(u64 paddr, u64 *data, u64 *ecc) 1140sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
1144{ 1141{
1145 struct ia64_sal_retval ret_stuff; 1142 struct ia64_sal_retval ret_stuff;
1146 unsigned long irq_flags;
1147 1143
1148 local_irq_save(irq_flags);
1149 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data, 1144 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data,
1150 (u64)ecc, 0, 0, 0, 0); 1145 (u64)ecc, 0, 0, 0, 0);
1151 local_irq_restore(irq_flags);
1152 return ret_stuff.status; 1146 return ret_stuff.status;
1153} 1147}
1154 1148
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
index 9bd2f9bf329..6f807e0193b 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/include/asm-ia64/sn/xp.h
@@ -60,23 +60,37 @@
60 * the bte_copy() once in the hope that the failure was due to a temporary 60 * the bte_copy() once in the hope that the failure was due to a temporary
61 * aberration (i.e., the link going down temporarily). 61 * aberration (i.e., the link going down temporarily).
62 * 62 *
63 * See bte_copy for definition of the input parameters. 63 * src - physical address of the source of the transfer.
64 * vdst - virtual address of the destination of the transfer.
65 * len - number of bytes to transfer from source to destination.
66 * mode - see bte_copy() for definition.
67 * notification - see bte_copy() for definition.
64 * 68 *
65 * Note: xp_bte_copy() should never be called while holding a spinlock. 69 * Note: xp_bte_copy() should never be called while holding a spinlock.
66 */ 70 */
67static inline bte_result_t 71static inline bte_result_t
68xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) 72xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
69{ 73{
70 bte_result_t ret; 74 bte_result_t ret;
75 u64 pdst = ia64_tpa(vdst);
71 76
72 77
73 ret = bte_copy(src, dest, len, mode, notification); 78 /*
79 * Ensure that the physically mapped memory is contiguous.
80 *
81 * We do this by ensuring that the memory is from region 7 only.
82 * If the need should arise to use memory from one of the other
83 * regions, then modify the BUG_ON() statement to ensure that the
84 * memory from that region is always physically contiguous.
85 */
86 BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
74 87
88 ret = bte_copy(src, pdst, len, mode, notification);
75 if (ret != BTE_SUCCESS) { 89 if (ret != BTE_SUCCESS) {
76 if (!in_interrupt()) { 90 if (!in_interrupt()) {
77 cond_resched(); 91 cond_resched();
78 } 92 }
79 ret = bte_copy(src, dest, len, mode, notification); 93 ret = bte_copy(src, pdst, len, mode, notification);
80 } 94 }
81 95
82 return ret; 96 return ret;
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index b72af597878..35e1386f37a 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -683,7 +683,9 @@ extern struct xpc_vars *xpc_vars;
683extern struct xpc_rsvd_page *xpc_rsvd_page; 683extern struct xpc_rsvd_page *xpc_rsvd_page;
684extern struct xpc_vars_part *xpc_vars_part; 684extern struct xpc_vars_part *xpc_vars_part;
685extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 685extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
686extern char xpc_remote_copy_buffer[]; 686extern char *xpc_remote_copy_buffer;
687extern void *xpc_remote_copy_buffer_base;
688extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
687extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); 689extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
688extern void xpc_allow_IPI_ops(void); 690extern void xpc_allow_IPI_ops(void);
689extern void xpc_restrict_IPI_ops(void); 691extern void xpc_restrict_IPI_ops(void);
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index a9496f34b04..36c4c34bf56 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -72,6 +72,9 @@ extern unsigned long pci_io_base;
72 * Neither do the standard versions now, these are just here 72 * Neither do the standard versions now, these are just here
73 * for older code. 73 * for older code.
74 */ 74 */
75#define insb(port, buf, ns) _insb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
76#define insw(port, buf, ns) _insw_ns((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
77#define insl(port, buf, nl) _insl_ns((u8 __iomem *)((port)+pci_io_base), (buf), (nl))
75#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) 78#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
76#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) 79#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
77#else 80#else
@@ -137,12 +140,12 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
137#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) 140#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
138#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) 141#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
139 142
143#endif
144
140#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) 145#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
141#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) 146#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
142#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) 147#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
143 148
144#endif
145
146#define readb_relaxed(addr) readb(addr) 149#define readb_relaxed(addr) readb(addr)
147#define readw_relaxed(addr) readw(addr) 150#define readw_relaxed(addr) readw(addr)
148#define readl_relaxed(addr) readl(addr) 151#define readl_relaxed(addr) readl(addr)
diff --git a/include/asm-powerpc/ipic.h b/include/asm-powerpc/ipic.h
index 0fe396a2b66..53079ec3a51 100644
--- a/include/asm-powerpc/ipic.h
+++ b/include/asm-powerpc/ipic.h
@@ -69,9 +69,6 @@ enum ipic_mcp_irq {
69 IPIC_MCP_MU = 7, 69 IPIC_MCP_MU = 7,
70}; 70};
71 71
72extern void ipic_init(phys_addr_t phys_addr, unsigned int flags,
73 unsigned int irq_offset,
74 unsigned char *senses, unsigned int senses_count);
75extern int ipic_set_priority(unsigned int irq, unsigned int priority); 72extern int ipic_set_priority(unsigned int irq, unsigned int priority);
76extern void ipic_set_highest_priority(unsigned int irq); 73extern void ipic_set_highest_priority(unsigned int irq);
77extern void ipic_set_default_priority(void); 74extern void ipic_set_default_priority(void);
@@ -79,7 +76,16 @@ extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq);
79extern void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq); 76extern void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq);
80extern u32 ipic_get_mcp_status(void); 77extern u32 ipic_get_mcp_status(void);
81extern void ipic_clear_mcp_status(u32 mask); 78extern void ipic_clear_mcp_status(u32 mask);
79
80#ifdef CONFIG_PPC_MERGE
81extern void ipic_init(struct device_node *node, unsigned int flags);
82extern unsigned int ipic_get_irq(struct pt_regs *regs);
83#else
84extern void ipic_init(phys_addr_t phys_addr, unsigned int flags,
85 unsigned int irq_offset,
86 unsigned char *senses, unsigned int senses_count);
82extern int ipic_get_irq(struct pt_regs *regs); 87extern int ipic_get_irq(struct pt_regs *regs);
88#endif
83 89
84#endif /* __ASM_IPIC_H__ */ 90#endif /* __ASM_IPIC_H__ */
85#endif /* __KERNEL__ */ 91#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index eb241c99c45..a9f9604b9ef 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -41,6 +41,7 @@
41#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0 41#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
42#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0 42#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
43#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0 43#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
44#define MPIC_GREG_IPI_STRIDE 0x10
44#define MPIC_GREG_SPURIOUS 0x000e0 45#define MPIC_GREG_SPURIOUS 0x000e0
45#define MPIC_GREG_TIMER_FREQ 0x000f0 46#define MPIC_GREG_TIMER_FREQ 0x000f0
46 47
@@ -68,6 +69,7 @@
68#define MPIC_CPU_IPI_DISPATCH_1 0x00050 69#define MPIC_CPU_IPI_DISPATCH_1 0x00050
69#define MPIC_CPU_IPI_DISPATCH_2 0x00060 70#define MPIC_CPU_IPI_DISPATCH_2 0x00060
70#define MPIC_CPU_IPI_DISPATCH_3 0x00070 71#define MPIC_CPU_IPI_DISPATCH_3 0x00070
72#define MPIC_CPU_IPI_DISPATCH_STRIDE 0x00010
71#define MPIC_CPU_CURRENT_TASK_PRI 0x00080 73#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
72#define MPIC_CPU_TASKPRI_MASK 0x0000000f 74#define MPIC_CPU_TASKPRI_MASK 0x0000000f
73#define MPIC_CPU_WHOAMI 0x00090 75#define MPIC_CPU_WHOAMI 0x00090
@@ -114,6 +116,103 @@
114#define MPIC_VEC_TIMER_1 248 116#define MPIC_VEC_TIMER_1 248
115#define MPIC_VEC_TIMER_0 247 117#define MPIC_VEC_TIMER_0 247
116 118
119/*
120 * Tsi108 implementation of MPIC has many differences from the original one
121 */
122
123/*
124 * Global registers
125 */
126
127#define TSI108_GREG_BASE 0x00000
128#define TSI108_GREG_FEATURE_0 0x00000
129#define TSI108_GREG_GLOBAL_CONF_0 0x00004
130#define TSI108_GREG_VENDOR_ID 0x0000c
131#define TSI108_GREG_IPI_VECTOR_PRI_0 0x00204 /* Doorbell 0 */
132#define TSI108_GREG_IPI_STRIDE 0x0c
133#define TSI108_GREG_SPURIOUS 0x00010
134#define TSI108_GREG_TIMER_FREQ 0x00014
135
136/*
137 * Timer registers
138 */
139#define TSI108_TIMER_BASE 0x0030
140#define TSI108_TIMER_STRIDE 0x10
141#define TSI108_TIMER_CURRENT_CNT 0x00000
142#define TSI108_TIMER_BASE_CNT 0x00004
143#define TSI108_TIMER_VECTOR_PRI 0x00008
144#define TSI108_TIMER_DESTINATION 0x0000c
145
146/*
147 * Per-Processor registers
148 */
149#define TSI108_CPU_BASE 0x00300
150#define TSI108_CPU_STRIDE 0x00040
151#define TSI108_CPU_IPI_DISPATCH_0 0x00200
152#define TSI108_CPU_IPI_DISPATCH_STRIDE 0x00000
153#define TSI108_CPU_CURRENT_TASK_PRI 0x00000
154#define TSI108_CPU_WHOAMI 0xffffffff
155#define TSI108_CPU_INTACK 0x00004
156#define TSI108_CPU_EOI 0x00008
157
158/*
159 * Per-source registers
160 */
161#define TSI108_IRQ_BASE 0x00100
162#define TSI108_IRQ_STRIDE 0x00008
163#define TSI108_IRQ_VECTOR_PRI 0x00000
164#define TSI108_VECPRI_VECTOR_MASK 0x000000ff
165#define TSI108_VECPRI_POLARITY_POSITIVE 0x01000000
166#define TSI108_VECPRI_POLARITY_NEGATIVE 0x00000000
167#define TSI108_VECPRI_SENSE_LEVEL 0x02000000
168#define TSI108_VECPRI_SENSE_EDGE 0x00000000
169#define TSI108_VECPRI_POLARITY_MASK 0x01000000
170#define TSI108_VECPRI_SENSE_MASK 0x02000000
171#define TSI108_IRQ_DESTINATION 0x00004
172
173/* weird mpic register indices and mask bits in the HW info array */
174enum {
175 MPIC_IDX_GREG_BASE = 0,
176 MPIC_IDX_GREG_FEATURE_0,
177 MPIC_IDX_GREG_GLOBAL_CONF_0,
178 MPIC_IDX_GREG_VENDOR_ID,
179 MPIC_IDX_GREG_IPI_VECTOR_PRI_0,
180 MPIC_IDX_GREG_IPI_STRIDE,
181 MPIC_IDX_GREG_SPURIOUS,
182 MPIC_IDX_GREG_TIMER_FREQ,
183
184 MPIC_IDX_TIMER_BASE,
185 MPIC_IDX_TIMER_STRIDE,
186 MPIC_IDX_TIMER_CURRENT_CNT,
187 MPIC_IDX_TIMER_BASE_CNT,
188 MPIC_IDX_TIMER_VECTOR_PRI,
189 MPIC_IDX_TIMER_DESTINATION,
190
191 MPIC_IDX_CPU_BASE,
192 MPIC_IDX_CPU_STRIDE,
193 MPIC_IDX_CPU_IPI_DISPATCH_0,
194 MPIC_IDX_CPU_IPI_DISPATCH_STRIDE,
195 MPIC_IDX_CPU_CURRENT_TASK_PRI,
196 MPIC_IDX_CPU_WHOAMI,
197 MPIC_IDX_CPU_INTACK,
198 MPIC_IDX_CPU_EOI,
199
200 MPIC_IDX_IRQ_BASE,
201 MPIC_IDX_IRQ_STRIDE,
202 MPIC_IDX_IRQ_VECTOR_PRI,
203
204 MPIC_IDX_VECPRI_VECTOR_MASK,
205 MPIC_IDX_VECPRI_POLARITY_POSITIVE,
206 MPIC_IDX_VECPRI_POLARITY_NEGATIVE,
207 MPIC_IDX_VECPRI_SENSE_LEVEL,
208 MPIC_IDX_VECPRI_SENSE_EDGE,
209 MPIC_IDX_VECPRI_POLARITY_MASK,
210 MPIC_IDX_VECPRI_SENSE_MASK,
211 MPIC_IDX_IRQ_DESTINATION,
212 MPIC_IDX_END
213};
214
215
117#ifdef CONFIG_MPIC_BROKEN_U3 216#ifdef CONFIG_MPIC_BROKEN_U3
118/* Fixup table entry */ 217/* Fixup table entry */
119struct mpic_irq_fixup 218struct mpic_irq_fixup
@@ -171,15 +270,29 @@ struct mpic
171 volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS]; 270 volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
172 volatile u32 __iomem *isus[MPIC_MAX_ISU]; 271 volatile u32 __iomem *isus[MPIC_MAX_ISU];
173 272
273#ifdef CONFIG_MPIC_WEIRD
274 /* Pointer to HW info array */
275 u32 *hw_set;
276#endif
277
174 /* link */ 278 /* link */
175 struct mpic *next; 279 struct mpic *next;
176}; 280};
177 281
282/*
283 * MPIC flags (passed to mpic_alloc)
284 *
285 * The top 4 bits contain an MPIC bhw id that is used to index the
286 * register offsets and some masks when CONFIG_MPIC_WEIRD is set.
287 * Note setting any ID (leaving those bits to 0) means standard MPIC
288 */
289
178/* This is the primary controller, only that one has IPIs and 290/* This is the primary controller, only that one has IPIs and
179 * has afinity control. A non-primary MPIC always uses CPU0 291 * has afinity control. A non-primary MPIC always uses CPU0
180 * registers only 292 * registers only
181 */ 293 */
182#define MPIC_PRIMARY 0x00000001 294#define MPIC_PRIMARY 0x00000001
295
183/* Set this for a big-endian MPIC */ 296/* Set this for a big-endian MPIC */
184#define MPIC_BIG_ENDIAN 0x00000002 297#define MPIC_BIG_ENDIAN 0x00000002
185/* Broken U3 MPIC */ 298/* Broken U3 MPIC */
@@ -188,6 +301,18 @@ struct mpic
188#define MPIC_BROKEN_IPI 0x00000008 301#define MPIC_BROKEN_IPI 0x00000008
189/* MPIC wants a reset */ 302/* MPIC wants a reset */
190#define MPIC_WANTS_RESET 0x00000010 303#define MPIC_WANTS_RESET 0x00000010
304/* Spurious vector requires EOI */
305#define MPIC_SPV_EOI 0x00000020
306/* No passthrough disable */
307#define MPIC_NO_PTHROU_DIS 0x00000040
308
309/* MPIC HW modification ID */
310#define MPIC_REGSET_MASK 0xf0000000
311#define MPIC_REGSET(val) (((val) & 0xf ) << 28)
312#define MPIC_GET_REGSET(flags) (((flags) >> 28) & 0xf)
313
314#define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */
315#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
191 316
192/* Allocate the controller structure and setup the linux irq descs 317/* Allocate the controller structure and setup the linux irq descs
193 * for the range if interrupts passed in. No HW initialization is 318 * for the range if interrupts passed in. No HW initialization is
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 7a457bd462a..c15e66a2e68 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -276,6 +276,7 @@ extern void of_irq_map_init(unsigned int flags);
276 * of_irq_map_raw - Low level interrupt tree parsing 276 * of_irq_map_raw - Low level interrupt tree parsing
277 * @parent: the device interrupt parent 277 * @parent: the device interrupt parent
278 * @intspec: interrupt specifier ("interrupts" property of the device) 278 * @intspec: interrupt specifier ("interrupts" property of the device)
279 * @ointsize: size of the passed in interrupt specifier
279 * @addr: address specifier (start of "reg" property of the device) 280 * @addr: address specifier (start of "reg" property of the device)
280 * @out_irq: structure of_irq filled by this function 281 * @out_irq: structure of_irq filled by this function
281 * 282 *
@@ -289,7 +290,8 @@ extern void of_irq_map_init(unsigned int flags);
289 */ 290 */
290 291
291extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec, 292extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
292 const u32 *addr, struct of_irq *out_irq); 293 u32 ointsize, const u32 *addr,
294 struct of_irq *out_irq);
293 295
294 296
295/** 297/**
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index dcde4410348..5785ac4737b 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -30,10 +30,6 @@ extern unsigned long tb_ticks_per_usec;
30extern unsigned long tb_ticks_per_sec; 30extern unsigned long tb_ticks_per_sec;
31extern u64 tb_to_xs; 31extern u64 tb_to_xs;
32extern unsigned tb_to_us; 32extern unsigned tb_to_us;
33extern unsigned long tb_last_stamp;
34extern u64 tb_last_jiffy;
35
36DECLARE_PER_CPU(unsigned long, last_jiffy);
37 33
38struct rtc_time; 34struct rtc_time;
39extern void to_tm(int tim, struct rtc_time * tm); 35extern void to_tm(int tim, struct rtc_time * tm);
diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h
index c70344b9104..f6a7ff04ffe 100644
--- a/include/asm-ppc/cpm2.h
+++ b/include/asm-ppc/cpm2.h
@@ -1093,5 +1093,100 @@ typedef struct im_idma {
1093 1093
1094#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */ 1094#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */
1095 1095
1096/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK
1097 * in order to use clock-computing stuff below for the FCC x
1098 */
1099
1100/* Automatically generates register configurations */
1101#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */
1102
1103#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */
1104#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */
1105#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */
1106#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */
1107#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */
1108#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */
1109
1110#define PC_F1RXCLK PC_CLK(F1_RXCLK)
1111#define PC_F1TXCLK PC_CLK(F1_TXCLK)
1112#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
1113#define CMX1_CLK_MASK ((uint)0xff000000)
1114
1115#define PC_F2RXCLK PC_CLK(F2_RXCLK)
1116#define PC_F2TXCLK PC_CLK(F2_TXCLK)
1117#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
1118#define CMX2_CLK_MASK ((uint)0x00ff0000)
1119
1120#define PC_F3RXCLK PC_CLK(F3_RXCLK)
1121#define PC_F3TXCLK PC_CLK(F3_TXCLK)
1122#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
1123#define CMX3_CLK_MASK ((uint)0x0000ff00)
1124
1125#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK)
1126#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE)
1127
1128#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK)
1129
1130/* I/O Pin assignment for FCC1. I don't yet know the best way to do this,
1131 * but there is little variation among the choices.
1132 */
1133#define PA1_COL 0x00000001U
1134#define PA1_CRS 0x00000002U
1135#define PA1_TXER 0x00000004U
1136#define PA1_TXEN 0x00000008U
1137#define PA1_RXDV 0x00000010U
1138#define PA1_RXER 0x00000020U
1139#define PA1_TXDAT 0x00003c00U
1140#define PA1_RXDAT 0x0003c000U
1141#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT)
1142#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
1143 PA1_RXDV | PA1_RXER)
1144#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
1145#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER)
1146
1147
1148/* I/O Pin assignment for FCC2. I don't yet know the best way to do this,
1149 * but there is little variation among the choices.
1150 */
1151#define PB2_TXER 0x00000001U
1152#define PB2_RXDV 0x00000002U
1153#define PB2_TXEN 0x00000004U
1154#define PB2_RXER 0x00000008U
1155#define PB2_COL 0x00000010U
1156#define PB2_CRS 0x00000020U
1157#define PB2_TXDAT 0x000003c0U
1158#define PB2_RXDAT 0x00003c00U
1159#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
1160 PB2_RXER | PB2_RXDV | PB2_TXER)
1161#define PB2_PSORB1 (PB2_TXEN)
1162#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
1163#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER)
1164
1165
1166/* I/O Pin assignment for FCC3. I don't yet know the best way to do this,
1167 * but there is little variation among the choices.
1168 */
1169#define PB3_RXDV 0x00004000U
1170#define PB3_RXER 0x00008000U
1171#define PB3_TXER 0x00010000U
1172#define PB3_TXEN 0x00020000U
1173#define PB3_COL 0x00040000U
1174#define PB3_CRS 0x00080000U
1175#define PB3_TXDAT 0x0f000000U
1176#define PC3_TXDAT 0x00000010U
1177#define PB3_RXDAT 0x00f00000U
1178#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
1179 PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
1180#define PB3_PSORB1 0
1181#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
1182#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER)
1183#define PC3_DIRC1 (PC3_TXDAT)
1184
1185/* Handy macro to specify mem for FCCs*/
1186#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128))
1187#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0)
1188#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
1189#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(2)
1190
1096#endif /* __CPM2__ */ 1191#endif /* __CPM2__ */
1097#endif /* __KERNEL__ */ 1192#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/mpc8260.h b/include/asm-ppc/mpc8260.h
index 4b93481e767..23579d4afae 100644
--- a/include/asm-ppc/mpc8260.h
+++ b/include/asm-ppc/mpc8260.h
@@ -82,6 +82,7 @@ enum ppc_sys_devices {
82 MPC82xx_CPM_SMC2, 82 MPC82xx_CPM_SMC2,
83 MPC82xx_CPM_USB, 83 MPC82xx_CPM_USB,
84 MPC82xx_SEC1, 84 MPC82xx_SEC1,
85 MPC82xx_MDIO_BB,
85 NUM_PPC_SYS_DEVS, 86 NUM_PPC_SYS_DEVS,
86}; 87};
87 88
diff --git a/include/asm-ppc/mpc8xx.h b/include/asm-ppc/mpc8xx.h
index adcce33f20a..d3a2f2fe230 100644
--- a/include/asm-ppc/mpc8xx.h
+++ b/include/asm-ppc/mpc8xx.h
@@ -110,6 +110,7 @@ enum ppc_sys_devices {
110 MPC8xx_CPM_SMC1, 110 MPC8xx_CPM_SMC1,
111 MPC8xx_CPM_SMC2, 111 MPC8xx_CPM_SMC2,
112 MPC8xx_CPM_USB, 112 MPC8xx_CPM_USB,
113 MPC8xx_MDIO_FEC,
113 NUM_PPC_SYS_DEVS, 114 NUM_PPC_SYS_DEVS,
114}; 115};
115 116
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 1ba19eb34ce..ebfe395cfb8 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -234,7 +234,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
234 sz_bits = 0UL; 234 sz_bits = 0UL;
235 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { 235 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
236 __asm__ __volatile__( 236 __asm__ __volatile__(
237 "\n661: sethi %uhi(%1), %0\n" 237 "\n661: sethi %%uhi(%1), %0\n"
238 " sllx %0, 32, %0\n" 238 " sllx %0, 32, %0\n"
239 " .section .sun4v_2insn_patch, \"ax\"\n" 239 " .section .sun4v_2insn_patch, \"ax\"\n"
240 " .word 661b\n" 240 " .word 661b\n"
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index aa67bfd1b3c..a584826cc57 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/cpufeature.h>
7 8
8struct alt_instr { 9struct alt_instr {
9 u8 *instr; /* original instruction */ 10 u8 *instr; /* original instruction */
@@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {}
102/* 103/*
103 * Alternative inline assembly for SMP. 104 * Alternative inline assembly for SMP.
104 * 105 *
105 * alternative_smp() takes two versions (SMP first, UP second) and is
106 * for more complex stuff such as spinlocks.
107 *
108 * The LOCK_PREFIX macro defined here replaces the LOCK and 106 * The LOCK_PREFIX macro defined here replaces the LOCK and
109 * LOCK_PREFIX macros used everywhere in the source tree. 107 * LOCK_PREFIX macros used everywhere in the source tree.
110 * 108 *
@@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {}
124 */ 122 */
125 123
126#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
127#define alternative_smp(smpinstr, upinstr, args...) \
128 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
129 ".section .smp_altinstructions,\"a\"\n" \
130 " .align 8\n" \
131 " .quad 661b\n" /* label */ \
132 " .quad 663f\n" /* new instruction */ \
133 " .byte 0x66\n" /* X86_FEATURE_UP */ \
134 " .byte 662b-661b\n" /* sourcelen */ \
135 " .byte 664f-663f\n" /* replacementlen */ \
136 ".previous\n" \
137 ".section .smp_altinstr_replacement,\"awx\"\n" \
138 "663:\n\t" upinstr "\n" /* replacement */ \
139 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
140 ".previous" : args)
141
142#define LOCK_PREFIX \ 125#define LOCK_PREFIX \
143 ".section .smp_locks,\"a\"\n" \ 126 ".section .smp_locks,\"a\"\n" \
144 " .align 8\n" \ 127 " .align 8\n" \
@@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {}
147 "661:\n\tlock; " 130 "661:\n\tlock; "
148 131
149#else /* ! CONFIG_SMP */ 132#else /* ! CONFIG_SMP */
150#define alternative_smp(smpinstr, upinstr, args...) \
151 asm volatile (upinstr : args)
152#define LOCK_PREFIX "" 133#define LOCK_PREFIX ""
153#endif 134#endif
154 135
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3b3c1217fe6..de9c3147ee4 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -232,8 +232,14 @@ struct tss_struct {
232 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 232 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
233} __attribute__((packed)) ____cacheline_aligned; 233} __attribute__((packed)) ____cacheline_aligned;
234 234
235
235extern struct cpuinfo_x86 boot_cpu_data; 236extern struct cpuinfo_x86 boot_cpu_data;
236DECLARE_PER_CPU(struct tss_struct,init_tss); 237DECLARE_PER_CPU(struct tss_struct,init_tss);
238/* Save the original ist values for checking stack pointers during debugging */
239struct orig_ist {
240 unsigned long ist[7];
241};
242DECLARE_PER_CPU(struct orig_ist, orig_ist);
237 243
238#ifdef CONFIG_X86_VSMP 244#ifdef CONFIG_X86_VSMP
239#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 245#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 8d3421996f9..248a79f0eaf 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -21,7 +21,7 @@
21 21
22#define __raw_spin_lock_string \ 22#define __raw_spin_lock_string \
23 "\n1:\t" \ 23 "\n1:\t" \
24 "lock ; decl %0\n\t" \ 24 LOCK_PREFIX " ; decl %0\n\t" \
25 "js 2f\n" \ 25 "js 2f\n" \
26 LOCK_SECTION_START("") \ 26 LOCK_SECTION_START("") \
27 "2:\t" \ 27 "2:\t" \
@@ -40,10 +40,7 @@
40 40
41static inline void __raw_spin_lock(raw_spinlock_t *lock) 41static inline void __raw_spin_lock(raw_spinlock_t *lock)
42{ 42{
43 alternative_smp( 43 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
44 __raw_spin_lock_string,
45 __raw_spin_lock_string_up,
46 "=m" (lock->slock) : : "memory");
47} 44}
48 45
49#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
125 122
126static inline void __raw_read_unlock(raw_rwlock_t *rw) 123static inline void __raw_read_unlock(raw_rwlock_t *rw)
127{ 124{
128 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 125 asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
129} 126}
130 127
131static inline void __raw_write_unlock(raw_rwlock_t *rw) 128static inline void __raw_write_unlock(raw_rwlock_t *rw)
132{ 129{
133 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" 130 asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
134 : "=m" (rw->lock) : : "memory"); 131 : "=m" (rw->lock) : : "memory");
135} 132}
136 133
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 94387c915e5..2d89d309a2a 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -620,8 +620,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 621__SYSCALL(__NR_move_pages, sys_move_pages)
622 622
623#ifdef __KERNEL__
624
625#define __NR_syscall_max __NR_move_pages 623#define __NR_syscall_max __NR_move_pages
626 624
627#ifndef __NO_STUBS 625#ifndef __NO_STUBS
@@ -746,6 +744,8 @@ __syscall_return(type,__res); \
746 744
747#else /* __KERNEL_SYSCALLS__ */ 745#else /* __KERNEL_SYSCALLS__ */
748 746
747#ifdef __KERNEL__
748
749#include <linux/syscalls.h> 749#include <linux/syscalls.h>
750#include <asm/ptrace.h> 750#include <asm/ptrace.h>
751 751
@@ -838,9 +838,9 @@ asmlinkage long sys_rt_sigaction(int sig,
838 struct sigaction __user *oact, 838 struct sigaction __user *oact,
839 size_t sigsetsize); 839 size_t sigsetsize);
840 840
841#endif /* __ASSEMBLY__ */ 841#endif
842 842
843#endif /* __NO_STUBS */ 843#endif
844 844
845/* 845/*
846 * "Conditional" syscalls 846 * "Conditional" syscalls
@@ -850,5 +850,6 @@ asmlinkage long sys_rt_sigaction(int sig,
850 */ 850 */
851#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 851#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
852 852
853#endif /* __KERNEL__ */ 853#endif
854
854#endif 855#endif
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index f3e7124effe..1f6e9bfb569 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
95#else 95#else
96 96
97#define UNW_PC(frame) ((void)(frame), 0) 97#define UNW_PC(frame) ((void)(frame), 0)
98#define UNW_SP(frame) ((void)(frame), 0)
98 99
99static inline int arch_unw_user_mode(const void *info) 100static inline int arch_unw_user_mode(const void *info)
100{ 101{
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 269d000bb2a..bea0255196c 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE)
216COMPATIBLE_IOCTL(VT_RESIZEX) 216COMPATIBLE_IOCTL(VT_RESIZEX)
217COMPATIBLE_IOCTL(VT_LOCKSWITCH) 217COMPATIBLE_IOCTL(VT_LOCKSWITCH)
218COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) 218COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
219COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
219/* Little p (/dev/rtc, /dev/envctrl, etc.) */ 220/* Little p (/dev/rtc, /dev/envctrl, etc.) */
220COMPATIBLE_IOCTL(RTC_AIE_ON) 221COMPATIBLE_IOCTL(RTC_AIE_ON)
221COMPATIBLE_IOCTL(RTC_AIE_OFF) 222COMPATIBLE_IOCTL(RTC_AIE_OFF)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 4ad0673b199..2f335e96601 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,7 +1,6 @@
1#ifndef _LINUX_FB_H 1#ifndef _LINUX_FB_H
2#define _LINUX_FB_H 2#define _LINUX_FB_H
3 3
4#include <linux/backlight.h>
5#include <asm/types.h> 4#include <asm/types.h>
6 5
7/* Definitions of frame buffers */ 6/* Definitions of frame buffers */
@@ -381,6 +380,7 @@ struct fb_cursor {
381#include <linux/workqueue.h> 380#include <linux/workqueue.h>
382#include <linux/notifier.h> 381#include <linux/notifier.h>
383#include <linux/list.h> 382#include <linux/list.h>
383#include <linux/backlight.h>
384#include <asm/io.h> 384#include <asm/io.h>
385 385
386struct vm_area_struct; 386struct vm_area_struct;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 25610205c90..555bc195c42 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -570,13 +570,14 @@ struct inode {
570 * 3: quota file 570 * 3: quota file
571 * 571 *
572 * The locking order between these classes is 572 * The locking order between these classes is
573 * parent -> child -> normal -> quota 573 * parent -> child -> normal -> xattr -> quota
574 */ 574 */
575enum inode_i_mutex_lock_class 575enum inode_i_mutex_lock_class
576{ 576{
577 I_MUTEX_NORMAL, 577 I_MUTEX_NORMAL,
578 I_MUTEX_PARENT, 578 I_MUTEX_PARENT,
579 I_MUTEX_CHILD, 579 I_MUTEX_CHILD,
580 I_MUTEX_XATTR,
580 I_MUTEX_QUOTA 581 I_MUTEX_QUOTA
581}; 582};
582 583
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index 783c476b867..74ed35a00a9 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -69,34 +69,21 @@ enum fs_ioport {
69 fsiop_porte, 69 fsiop_porte,
70}; 70};
71 71
72struct fs_mii_bus_info { 72struct fs_mii_bit {
73 int method; /* mii method */ 73 u32 offset;
74 int id; /* the id of the mii_bus */ 74 u8 bit;
75 int disable_aneg; /* if the controller needs to negothiate speed & duplex */ 75 u8 polarity;
76 int lpa; /* the default board-specific vallues will be applied otherwise */ 76};
77 77struct fs_mii_bb_platform_info {
78 union { 78 struct fs_mii_bit mdio_dir;
79 struct { 79 struct fs_mii_bit mdio_dat;
80 int duplex; 80 struct fs_mii_bit mdc_dat;
81 int speed; 81 int mdio_port; /* port & bit for MDIO */
82 } fixed; 82 int mdio_bit;
83 83 int mdc_port; /* port & bit for MDC */
84 struct { 84 int mdc_bit;
85 /* nothing */ 85 int delay; /* delay in us */
86 } fec; 86 int irq[32]; /* irqs per phy's */
87
88 struct {
89 /* nothing */
90 } scc;
91
92 struct {
93 int mdio_port; /* port & bit for MDIO */
94 int mdio_bit;
95 int mdc_port; /* port & bit for MDC */
96 int mdc_bit;
97 int delay; /* delay in us */
98 } bitbang;
99 } i;
100}; 87};
101 88
102struct fs_platform_info { 89struct fs_platform_info {
@@ -119,6 +106,7 @@ struct fs_platform_info {
119 u32 device_flags; 106 u32 device_flags;
120 107
121 int phy_addr; /* the phy address (-1 no phy) */ 108 int phy_addr; /* the phy address (-1 no phy) */
109 const char* bus_id;
122 int phy_irq; /* the phy irq (if it exists) */ 110 int phy_irq; /* the phy irq (if it exists) */
123 111
124 const struct fs_mii_bus_info *bus_info; 112 const struct fs_mii_bus_info *bus_info;
@@ -130,6 +118,10 @@ struct fs_platform_info {
130 int napi_weight; /* NAPI weight */ 118 int napi_weight; /* NAPI weight */
131 119
132 int use_rmii; /* use RMII mode */ 120 int use_rmii; /* use RMII mode */
121 int has_phy; /* if the network is phy container as well...*/
122};
123struct fs_mii_fec_platform_info {
124 u32 irq[32];
125 u32 mii_speed;
133}; 126};
134
135#endif 127#endif
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 383627ad328..ab274083274 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
155{ 155{
156 struct net_device_stats *stats; 156 struct net_device_stats *stats;
157 157
158 if (skb_bond_should_drop(skb)) {
159 dev_kfree_skb_any(skb);
160 return NET_RX_DROP;
161 }
162
158 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 163 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
159 if (skb->dev == NULL) { 164 if (skb->dev == NULL) {
160 dev_kfree_skb_any(skb); 165 dev_kfree_skb_any(skb);
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 88d5961f7a3..8e2042b9d47 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task)
59/* 59/*
60 * For inheritance, return the highest of the two given priorities 60 * For inheritance, return the highest of the two given priorities
61 */ 61 */
62static inline int ioprio_best(unsigned short aprio, unsigned short bprio) 62extern int ioprio_best(unsigned short aprio, unsigned short bprio);
63{
64 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
65 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
66
67 if (!ioprio_valid(aprio))
68 return bprio;
69 if (!ioprio_valid(bprio))
70 return aprio;
71
72 if (aclass == IOPRIO_CLASS_NONE)
73 aclass = IOPRIO_CLASS_BE;
74 if (bclass == IOPRIO_CLASS_NONE)
75 bclass = IOPRIO_CLASS_BE;
76
77 if (aclass == bclass)
78 return min(aprio, bprio);
79 if (aclass > bclass)
80 return bprio;
81 else
82 return aprio;
83}
84 63
85#endif 64#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 20eb34403d0..a04c154c520 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -72,6 +72,9 @@ extern int journal_enable_debug;
72#endif 72#endif
73 73
74extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); 74extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
75extern void * jbd_slab_alloc(size_t size, gfp_t flags);
76extern void jbd_slab_free(void *ptr, size_t size);
77
75#define jbd_kmalloc(size, flags) \ 78#define jbd_kmalloc(size, flags) \
76 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) 79 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
77#define jbd_rep_kmalloc(size, flags) \ 80#define jbd_rep_kmalloc(size, flags) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 181c69cad4e..851aa1bcfc1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -210,6 +210,7 @@ extern enum system_states {
210extern void dump_stack(void); 210extern void dump_stack(void);
211 211
212#ifdef DEBUG 212#ifdef DEBUG
213/* If you are writing a driver, please use dev_dbg instead */
213#define pr_debug(fmt,arg...) \ 214#define pr_debug(fmt,arg...) \
214 printk(KERN_DEBUG fmt,##arg) 215 printk(KERN_DEBUG fmt,##arg)
215#else 216#else
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 990957e0929..f0b135cd86d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -336,6 +336,7 @@ static inline void init_page_count(struct page *page)
336} 336}
337 337
338void put_page(struct page *page); 338void put_page(struct page *page);
339void put_pages_list(struct list_head *pages);
339 340
340void split_page(struct page *page, unsigned int order); 341void split_page(struct page *page, unsigned int order);
341 342
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 75f02d8c6ed..50a4719512e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -320,6 +320,9 @@ struct net_device
320#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) 320#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
321#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) 321#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
322 322
323 /* List of features with software fallbacks. */
324#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
325
323#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 326#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
324#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 327#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
325 328
@@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1012 unlikely(skb->ip_summed != CHECKSUM_HW)); 1015 unlikely(skb->ip_summed != CHECKSUM_HW));
1013} 1016}
1014 1017
1018/* On bonding slaves other than the currently active slave, suppress
1019 * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
1020 */
1021static inline int skb_bond_should_drop(struct sk_buff *skb)
1022{
1023 struct net_device *dev = skb->dev;
1024 struct net_device *master = dev->master;
1025
1026 if (master &&
1027 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1028 if (master->priv_flags & IFF_MASTER_ALB) {
1029 if (skb->pkt_type != PACKET_BROADCAST &&
1030 skb->pkt_type != PACKET_MULTICAST)
1031 return 0;
1032 }
1033 if (master->priv_flags & IFF_MASTER_8023AD &&
1034 skb->protocol == __constant_htons(ETH_P_SLOW))
1035 return 0;
1036
1037 return 1;
1038 }
1039 return 0;
1040}
1041
1015#endif /* __KERNEL__ */ 1042#endif /* __KERNEL__ */
1016 1043
1017#endif /* _LINUX_DEV_H */ 1044#endif /* _LINUX_DEV_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 10c13dc4665..427c67ff89e 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -48,15 +48,25 @@ enum nf_br_hook_priorities {
48 48
49/* Only used in br_forward.c */ 49/* Only used in br_forward.c */
50static inline 50static inline
51void nf_bridge_maybe_copy_header(struct sk_buff *skb) 51int nf_bridge_maybe_copy_header(struct sk_buff *skb)
52{ 52{
53 int err;
54
53 if (skb->nf_bridge) { 55 if (skb->nf_bridge) {
54 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 56 if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
57 err = skb_cow(skb, 18);
58 if (err)
59 return err;
55 memcpy(skb->data - 18, skb->nf_bridge->data, 18); 60 memcpy(skb->data - 18, skb->nf_bridge->data, 18);
56 skb_push(skb, 4); 61 skb_push(skb, 4);
57 } else 62 } else {
63 err = skb_cow(skb, 16);
64 if (err)
65 return err;
58 memcpy(skb->data - 16, skb->nf_bridge->data, 16); 66 memcpy(skb->data - 16, skb->nf_bridge->data, 16);
67 }
59 } 68 }
69 return 0;
60} 70}
61 71
62/* This is called by the IP fragmenting code and it ensures there is 72/* This is called by the IP fragmenting code and it ensures there is
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2d3fb6416d9..db9cbf68e12 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -659,7 +659,7 @@ struct nfs4_rename_res {
659struct nfs4_setclientid { 659struct nfs4_setclientid {
660 const nfs4_verifier * sc_verifier; /* request */ 660 const nfs4_verifier * sc_verifier; /* request */
661 unsigned int sc_name_len; 661 unsigned int sc_name_len;
662 char sc_name[32]; /* request */ 662 char sc_name[48]; /* request */
663 u32 sc_prog; /* request */ 663 u32 sc_prog; /* request */
664 unsigned int sc_netid_len; 664 unsigned int sc_netid_len;
665 char sc_netid[4]; /* request */ 665 char sc_netid[4]; /* request */
diff --git a/include/linux/node.h b/include/linux/node.h
index 81dcec84cd8..bc001bc225c 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -30,12 +30,20 @@ extern struct node node_devices[];
30 30
31extern int register_node(struct node *, int, struct node *); 31extern int register_node(struct node *, int, struct node *);
32extern void unregister_node(struct node *node); 32extern void unregister_node(struct node *node);
33#ifdef CONFIG_NUMA
33extern int register_one_node(int nid); 34extern int register_one_node(int nid);
34extern void unregister_one_node(int nid); 35extern void unregister_one_node(int nid);
35#ifdef CONFIG_NUMA
36extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); 36extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
37extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); 37extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
38#else 38#else
39static inline int register_one_node(int nid)
40{
41 return 0;
42}
43static inline int unregister_one_node(int nid)
44{
45 return 0;
46}
39static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) 47static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
40{ 48{
41 return 0; 49 return 0;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4eae06b08cf..4c2839eab7f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1726,6 +1726,9 @@
1726#define PCI_VENDOR_ID_DOMEX 0x134a 1726#define PCI_VENDOR_ID_DOMEX 0x134a
1727#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 1727#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
1728 1728
1729#define PCI_VENDOR_ID_INTASHIELD 0x135a
1730#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
1731
1729#define PCI_VENDOR_ID_QUATECH 0x135C 1732#define PCI_VENDOR_ID_QUATECH 0x135C
1730#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 1733#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
1731#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 1734#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 331521a10a2..9447a57ee8a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -378,6 +378,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
378 struct mii_ioctl_data *mii_data, int cmd); 378 struct mii_ioctl_data *mii_data, int cmd);
379int phy_start_interrupts(struct phy_device *phydev); 379int phy_start_interrupts(struct phy_device *phydev);
380void phy_print_status(struct phy_device *phydev); 380void phy_print_status(struct phy_device *phydev);
381struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
381 382
382extern struct bus_type mdio_bus_type; 383extern struct bus_type mdio_bus_type;
383#endif /* __PHY_H */ 384#endif /* __PHY_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 19c96d498e2..755e9cddac4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1040,6 +1040,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1040} 1040}
1041 1041
1042/** 1042/**
1043 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1044 * @skb: buffer to alter
1045 * @len: new length
1046 *
1047 * This is identical to pskb_trim except that the caller knows that
1048 * the skb is not cloned so we should never get an error due to out-
1049 * of-memory.
1050 */
1051static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1052{
1053 int err = pskb_trim(skb, len);
1054 BUG_ON(err);
1055}
1056
1057/**
1043 * skb_orphan - orphan a buffer 1058 * skb_orphan - orphan a buffer
1044 * @skb: buffer to orphan 1059 * @skb: buffer to orphan
1045 * 1060 *
@@ -1081,7 +1096,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1081 * the headroom they think they need without accounting for the 1096 * the headroom they think they need without accounting for the
1082 * built in space. The built in space is used for optimisations. 1097 * built in space. The built in space is used for optimisations.
1083 * 1098 *
1084 * %NULL is returned in there is no free memory. 1099 * %NULL is returned if there is no free memory.
1085 */ 1100 */
1086static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1101static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1087 gfp_t gfp_mask) 1102 gfp_t gfp_mask)
@@ -1101,7 +1116,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1101 * the headroom they think they need without accounting for the 1116 * the headroom they think they need without accounting for the
1102 * built in space. The built in space is used for optimisations. 1117 * built in space. The built in space is used for optimisations.
1103 * 1118 *
1104 * %NULL is returned in there is no free memory. Although this function 1119 * %NULL is returned if there is no free memory. Although this function
1105 * allocates memory it can be called from an interrupt. 1120 * allocates memory it can be called from an interrupt.
1106 */ 1121 */
1107static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1122static inline struct sk_buff *dev_alloc_skb(unsigned int length)
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 2c2189cb30a..a481472c948 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -42,9 +42,9 @@ RPC_I(struct inode *inode)
42extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); 42extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
43 43
44extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); 44extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
45extern int rpc_rmdir(char *); 45extern int rpc_rmdir(struct dentry *);
46extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags); 46extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags);
47extern int rpc_unlink(char *); 47extern int rpc_unlink(struct dentry *);
48extern struct vfsmount *rpc_get_mount(void); 48extern struct vfsmount *rpc_get_mount(void);
49extern void rpc_put_mount(void); 49extern void rpc_put_mount(void);
50 50
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 840e47a4ccc..3a0cca255b7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport;
37 37
38#define RPC_MIN_RESVPORT (1U) 38#define RPC_MIN_RESVPORT (1U)
39#define RPC_MAX_RESVPORT (65535U) 39#define RPC_MAX_RESVPORT (65535U)
40#define RPC_DEF_MIN_RESVPORT (650U) 40#define RPC_DEF_MIN_RESVPORT (665U)
41#define RPC_DEF_MAX_RESVPORT (1023U) 41#define RPC_DEF_MAX_RESVPORT (1023U)
42 42
43/* 43/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e421d5e3481..04827ca6578 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -59,6 +59,7 @@ struct tty_bufhead {
59 struct tty_buffer *head; /* Queue head */ 59 struct tty_buffer *head; /* Queue head */
60 struct tty_buffer *tail; /* Active buffer */ 60 struct tty_buffer *tail; /* Active buffer */
61 struct tty_buffer *free; /* Free queue head */ 61 struct tty_buffer *free; /* Free queue head */
62 int memory_used; /* Buffer space used excluding free queue */
62}; 63};
63/* 64/*
64 * The pty uses char_buf and flag_buf as a contiguous buffer 65 * The pty uses char_buf and flag_buf as a contiguous buffer
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 8ab334a4822..ba806e8711b 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -60,5 +60,6 @@ struct vt_consize {
60#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ 60#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */
61#define VT_LOCKSWITCH 0x560B /* disallow vt switching */ 61#define VT_LOCKSWITCH 0x560B /* disallow vt switching */
62#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ 62#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */
63#define VT_GETHIFONTMASK 0x560D /* return hi font mask */
63 64
64#endif /* _LINUX_VT_H */ 65#endif /* _LINUX_VT_H */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index f8665326ed9..600d61d7d2a 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -16,7 +16,7 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/compiler.h> /* need __user */ 18#include <linux/compiler.h> /* need __user */
19#ifdef CONFIG_VIDEO_V4L1 19#ifdef CONFIG_VIDEO_V4L1_COMPAT
20#include <linux/videodev.h> 20#include <linux/videodev.h>
21#else 21#else
22#include <linux/videodev2.h> 22#include <linux/videodev2.h>
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a9663b49ea5..92eae0e0f3f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -404,19 +404,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
404 return ((head->next != head) && (head->next == head->prev)); 404 return ((head->next != head) && (head->next == head->prev));
405} 405}
406 406
407/* Calculate the size (in bytes) occupied by the data of an iovec. */
408static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
409{
410 size_t retval = 0;
411
412 for (; iovlen > 0; --iovlen) {
413 retval += iov->iov_len;
414 iov++;
415 }
416
417 return retval;
418}
419
420/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ 407/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
421static inline __s32 sctp_jitter(__u32 rto) 408static inline __s32 sctp_jitter(__u32 rto)
422{ 409{
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 1eac3d0eb7a..de313de4fef 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -221,8 +221,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
221 const struct sctp_chunk *, 221 const struct sctp_chunk *,
222 __u32 tsn); 222 __u32 tsn);
223struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, 223struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
224 const struct sctp_chunk *, 224 const struct msghdr *, size_t msg_len);
225 const struct msghdr *);
226struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, 225struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
227 const struct sctp_chunk *, 226 const struct sctp_chunk *,
228 const __u8 *, 227 const __u8 *,
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index ba2760802de..41904f611d1 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -60,6 +60,7 @@ struct iscsi_nopin;
60#define TMABORT_SUCCESS 0x1 60#define TMABORT_SUCCESS 0x1
61#define TMABORT_FAILED 0x2 61#define TMABORT_FAILED 0x2
62#define TMABORT_TIMEDOUT 0x3 62#define TMABORT_TIMEDOUT 0x3
63#define TMABORT_NOT_FOUND 0x4
63 64
64/* Connection suspend "bit" */ 65/* Connection suspend "bit" */
65#define ISCSI_SUSPEND_BIT 1 66#define ISCSI_SUSPEND_BIT 1
@@ -83,6 +84,12 @@ struct iscsi_mgmt_task {
83 struct list_head running; 84 struct list_head running;
84}; 85};
85 86
87enum {
88 ISCSI_TASK_COMPLETED,
89 ISCSI_TASK_PENDING,
90 ISCSI_TASK_RUNNING,
91};
92
86struct iscsi_cmd_task { 93struct iscsi_cmd_task {
87 /* 94 /*
88 * Becuae LLDs allocate their hdr differently, this is a pointer to 95 * Becuae LLDs allocate their hdr differently, this is a pointer to
@@ -101,6 +108,8 @@ struct iscsi_cmd_task {
101 struct iscsi_conn *conn; /* used connection */ 108 struct iscsi_conn *conn; /* used connection */
102 struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ 109 struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
103 110
111 /* state set/tested under session->lock */
112 int state;
104 struct list_head running; /* running cmd list */ 113 struct list_head running; /* running cmd list */
105 void *dd_data; /* driver/transport data */ 114 void *dd_data; /* driver/transport data */
106}; 115};
@@ -126,6 +135,14 @@ struct iscsi_conn {
126 int id; /* CID */ 135 int id; /* CID */
127 struct list_head item; /* maintains list of conns */ 136 struct list_head item; /* maintains list of conns */
128 int c_stage; /* connection state */ 137 int c_stage; /* connection state */
138 /*
139 * Preallocated buffer for pdus that have data but do not
140 * originate from scsi-ml. We never have two pdus using the
141 * buffer at the same time. It is only allocated to
142 * the default max recv size because the pdus we support
143 * should always fit in this buffer
144 */
145 char *data;
129 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
130 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
131 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
@@ -134,7 +151,7 @@ struct iscsi_conn {
134 struct kfifo *immqueue; /* immediate xmit queue */ 151 struct kfifo *immqueue; /* immediate xmit queue */
135 struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ 152 struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
136 struct list_head mgmt_run_list; /* list of control tasks */ 153 struct list_head mgmt_run_list; /* list of control tasks */
137 struct kfifo *xmitqueue; /* data-path cmd queue */ 154 struct list_head xmitqueue; /* data-path cmd queue */
138 struct list_head run_list; /* list of cmds in progress */ 155 struct list_head run_list; /* list of cmds in progress */
139 struct work_struct xmitwork; /* per-conn. xmit workqueue */ 156 struct work_struct xmitwork; /* per-conn. xmit workqueue */
140 /* 157 /*
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 5a3df1d7085..39e833260bd 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -57,8 +57,6 @@ struct sockaddr;
57 * @stop_conn: suspend/recover/terminate connection 57 * @stop_conn: suspend/recover/terminate connection
58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. 58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
59 * @session_recovery_timedout: notify LLD a block during recovery timed out 59 * @session_recovery_timedout: notify LLD a block during recovery timed out
60 * @suspend_conn_recv: susepend the recv side of the connection
61 * @termincate_conn: destroy socket connection. Called with mutex lock.
62 * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs. 60 * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
63 * Called from queuecommand with session lock held. 61 * Called from queuecommand with session lock held.
64 * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs. 62 * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
@@ -112,8 +110,6 @@ struct iscsi_transport {
112 char *data, uint32_t data_size); 110 char *data, uint32_t data_size);
113 void (*get_stats) (struct iscsi_cls_conn *conn, 111 void (*get_stats) (struct iscsi_cls_conn *conn,
114 struct iscsi_stats *stats); 112 struct iscsi_stats *stats);
115 void (*suspend_conn_recv) (struct iscsi_conn *conn);
116 void (*terminate_conn) (struct iscsi_conn *conn);
117 void (*init_cmd_task) (struct iscsi_cmd_task *ctask); 113 void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
118 void (*init_mgmt_task) (struct iscsi_conn *conn, 114 void (*init_mgmt_task) (struct iscsi_conn *conn,
119 struct iscsi_mgmt_task *mtask, 115 struct iscsi_mgmt_task *mtask,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1a649f2bb9b..4ea6f0dc2fc 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -816,6 +816,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
816 struct cpuset trialcs; 816 struct cpuset trialcs;
817 int retval, cpus_unchanged; 817 int retval, cpus_unchanged;
818 818
819 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
820 if (cs == &top_cpuset)
821 return -EACCES;
822
819 trialcs = *cs; 823 trialcs = *cs;
820 retval = cpulist_parse(buf, trialcs.cpus_allowed); 824 retval = cpulist_parse(buf, trialcs.cpus_allowed);
821 if (retval < 0) 825 if (retval < 0)
@@ -2033,6 +2037,33 @@ out:
2033 return err; 2037 return err;
2034} 2038}
2035 2039
2040/*
2041 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2042 * period. This is necessary in order to make cpusets transparent
2043 * (of no affect) on systems that are actively using CPU hotplug
2044 * but making no active use of cpusets.
2045 *
2046 * This handles CPU hotplug (cpuhp) events. If someday Memory
2047 * Nodes can be hotplugged (dynamically changing node_online_map)
2048 * then we should handle that too, perhaps in a similar way.
2049 */
2050
2051#ifdef CONFIG_HOTPLUG_CPU
2052static int cpuset_handle_cpuhp(struct notifier_block *nb,
2053 unsigned long phase, void *cpu)
2054{
2055 mutex_lock(&manage_mutex);
2056 mutex_lock(&callback_mutex);
2057
2058 top_cpuset.cpus_allowed = cpu_online_map;
2059
2060 mutex_unlock(&callback_mutex);
2061 mutex_unlock(&manage_mutex);
2062
2063 return 0;
2064}
2065#endif
2066
2036/** 2067/**
2037 * cpuset_init_smp - initialize cpus_allowed 2068 * cpuset_init_smp - initialize cpus_allowed
2038 * 2069 *
@@ -2043,6 +2074,8 @@ void __init cpuset_init_smp(void)
2043{ 2074{
2044 top_cpuset.cpus_allowed = cpu_online_map; 2075 top_cpuset.cpus_allowed = cpu_online_map;
2045 top_cpuset.mems_allowed = node_online_map; 2076 top_cpuset.mems_allowed = node_online_map;
2077
2078 hotcpu_notifier(cpuset_handle_cpuhp, 0);
2046} 2079}
2047 2080
2048/** 2081/**
@@ -2387,7 +2420,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2387int cpuset_excl_nodes_overlap(const struct task_struct *p) 2420int cpuset_excl_nodes_overlap(const struct task_struct *p)
2388{ 2421{
2389 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 2422 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2390 int overlap = 0; /* do cpusets overlap? */ 2423 int overlap = 1; /* do cpusets overlap? */
2391 2424
2392 task_lock(current); 2425 task_lock(current);
2393 if (current->flags & PF_EXITING) { 2426 if (current->flags & PF_EXITING) {
diff --git a/kernel/futex.c b/kernel/futex.c
index c2b2e0b83ab..b9b8aea5389 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -297,7 +297,7 @@ static int futex_handle_fault(unsigned long address, int attempt)
297 struct vm_area_struct * vma; 297 struct vm_area_struct * vma;
298 struct mm_struct *mm = current->mm; 298 struct mm_struct *mm = current->mm;
299 299
300 if (attempt >= 2 || !(vma = find_vma(mm, address)) || 300 if (attempt > 2 || !(vma = find_vma(mm, address)) ||
301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) 301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
302 return -EFAULT; 302 return -EFAULT;
303 303
@@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
397 p = NULL; 397 p = NULL;
398 goto out_unlock; 398 goto out_unlock;
399 } 399 }
400 if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { 400 if (p->exit_state != 0) {
401 p = NULL; 401 p = NULL;
402 goto out_unlock; 402 goto out_unlock;
403 } 403 }
@@ -747,8 +747,10 @@ retry:
747 */ 747 */
748 if (attempt++) { 748 if (attempt++) {
749 if (futex_handle_fault((unsigned long)uaddr2, 749 if (futex_handle_fault((unsigned long)uaddr2,
750 attempt)) 750 attempt)) {
751 ret = -EFAULT;
751 goto out; 752 goto out;
753 }
752 goto retry; 754 goto retry;
753 } 755 }
754 756
@@ -1322,9 +1324,10 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1322 * still holding the mmap_sem. 1324 * still holding the mmap_sem.
1323 */ 1325 */
1324 if (attempt++) { 1326 if (attempt++) {
1325 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1327 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1328 ret = -EFAULT;
1326 goto out_unlock_release_sem; 1329 goto out_unlock_release_sem;
1327 1330 }
1328 goto retry_locked; 1331 goto retry_locked;
1329 } 1332 }
1330 1333
@@ -1506,9 +1509,10 @@ pi_faulted:
1506 * still holding the mmap_sem. 1509 * still holding the mmap_sem.
1507 */ 1510 */
1508 if (attempt++) { 1511 if (attempt++) {
1509 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1512 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1513 ret = -EFAULT;
1510 goto out_unlock; 1514 goto out_unlock;
1511 1515 }
1512 goto retry_locked; 1516 goto retry_locked;
1513 } 1517 }
1514 1518
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index be989efc785..21c38a7e666 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -187,7 +187,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
187{ 187{
188 struct hrtimer_base *new_base; 188 struct hrtimer_base *new_base;
189 189
190 new_base = &__get_cpu_var(hrtimer_bases[base->index]); 190 new_base = &__get_cpu_var(hrtimer_bases)[base->index];
191 191
192 if (base != new_base) { 192 if (base != new_base) {
193 /* 193 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index d8a0bca2123..9b8dcfd1ca9 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
21#include <linux/debug_locks.h>
21 22
22int panic_on_oops; 23int panic_on_oops;
23int tainted; 24int tainted;
diff --git a/kernel/sched.c b/kernel/sched.c
index a2be2d05529..a234fbee123 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4162 read_unlock_irq(&tasklist_lock); 4162 read_unlock_irq(&tasklist_lock);
4163 return -ESRCH; 4163 return -ESRCH;
4164 } 4164 }
4165 get_task_struct(p);
4166 read_unlock_irq(&tasklist_lock);
4167 retval = sched_setscheduler(p, policy, &lparam); 4165 retval = sched_setscheduler(p, policy, &lparam);
4168 put_task_struct(p); 4166 read_unlock_irq(&tasklist_lock);
4169 4167
4170 return retval; 4168 return retval;
4171} 4169}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index dcfb5d73146..51cacd111db 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -111,7 +111,6 @@ static int stop_machine(void)
111 /* If some failed, kill them all. */ 111 /* If some failed, kill them all. */
112 if (ret < 0) { 112 if (ret < 0) {
113 stopmachine_set_state(STOPMACHINE_EXIT); 113 stopmachine_set_state(STOPMACHINE_EXIT);
114 up(&stopmachine_mutex);
115 return ret; 114 return ret;
116 } 115 }
117 116
diff --git a/kernel/timer.c b/kernel/timer.c
index b650f04888e..1d7dd6267c2 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1324,46 +1324,19 @@ asmlinkage long sys_getpid(void)
1324} 1324}
1325 1325
1326/* 1326/*
1327 * Accessing ->group_leader->real_parent is not SMP-safe, it could 1327 * Accessing ->real_parent is not SMP-safe, it could
1328 * change from under us. However, rather than getting any lock 1328 * change from under us. However, we can use a stale
1329 * we can use an optimistic algorithm: get the parent 1329 * value of ->real_parent under rcu_read_lock(), see
1330 * pid, and go back and check that the parent is still 1330 * release_task()->call_rcu(delayed_put_task_struct).
1331 * the same. If it has changed (which is extremely unlikely
1332 * indeed), we just try again..
1333 *
1334 * NOTE! This depends on the fact that even if we _do_
1335 * get an old value of "parent", we can happily dereference
1336 * the pointer (it was and remains a dereferencable kernel pointer
1337 * no matter what): we just can't necessarily trust the result
1338 * until we know that the parent pointer is valid.
1339 *
1340 * NOTE2: ->group_leader never changes from under us.
1341 */ 1331 */
1342asmlinkage long sys_getppid(void) 1332asmlinkage long sys_getppid(void)
1343{ 1333{
1344 int pid; 1334 int pid;
1345 struct task_struct *me = current;
1346 struct task_struct *parent;
1347 1335
1348 parent = me->group_leader->real_parent; 1336 rcu_read_lock();
1349 for (;;) { 1337 pid = rcu_dereference(current->real_parent)->tgid;
1350 pid = parent->tgid; 1338 rcu_read_unlock();
1351#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1352{
1353 struct task_struct *old = parent;
1354 1339
1355 /*
1356 * Make sure we read the pid before re-reading the
1357 * parent pointer:
1358 */
1359 smp_rmb();
1360 parent = me->group_leader->real_parent;
1361 if (old != parent)
1362 continue;
1363}
1364#endif
1365 break;
1366 }
1367 return pid; 1340 return pid;
1368} 1341}
1369 1342
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 448e8f7b342..835fe28b87a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,7 +68,7 @@ struct workqueue_struct {
68 68
69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */ 70 threads to each one as cpus come/go. */
71static DEFINE_SPINLOCK(workqueue_lock); 71static DEFINE_MUTEX(workqueue_mutex);
72static LIST_HEAD(workqueues); 72static LIST_HEAD(workqueues);
73 73
74static int singlethread_cpu; 74static int singlethread_cpu;
@@ -320,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
320 } else { 320 } else {
321 int cpu; 321 int cpu;
322 322
323 lock_cpu_hotplug(); 323 mutex_lock(&workqueue_mutex);
324 for_each_online_cpu(cpu) 324 for_each_online_cpu(cpu)
325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
326 unlock_cpu_hotplug(); 326 mutex_unlock(&workqueue_mutex);
327 } 327 }
328} 328}
329EXPORT_SYMBOL_GPL(flush_workqueue); 329EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -371,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
371 } 371 }
372 372
373 wq->name = name; 373 wq->name = name;
374 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 mutex_lock(&workqueue_mutex);
375 lock_cpu_hotplug();
376 if (singlethread) { 375 if (singlethread) {
377 INIT_LIST_HEAD(&wq->list); 376 INIT_LIST_HEAD(&wq->list);
378 p = create_workqueue_thread(wq, singlethread_cpu); 377 p = create_workqueue_thread(wq, singlethread_cpu);
@@ -381,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
381 else 380 else
382 wake_up_process(p); 381 wake_up_process(p);
383 } else { 382 } else {
384 spin_lock(&workqueue_lock);
385 list_add(&wq->list, &workqueues); 383 list_add(&wq->list, &workqueues);
386 spin_unlock(&workqueue_lock);
387 for_each_online_cpu(cpu) { 384 for_each_online_cpu(cpu) {
388 p = create_workqueue_thread(wq, cpu); 385 p = create_workqueue_thread(wq, cpu);
389 if (p) { 386 if (p) {
@@ -393,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
393 destroy = 1; 390 destroy = 1;
394 } 391 }
395 } 392 }
396 unlock_cpu_hotplug(); 393 mutex_unlock(&workqueue_mutex);
397 394
398 /* 395 /*
399 * Was there any error during startup? If yes then clean up: 396 * Was there any error during startup? If yes then clean up:
@@ -434,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq)
434 flush_workqueue(wq); 431 flush_workqueue(wq);
435 432
436 /* We don't need the distraction of CPUs appearing and vanishing. */ 433 /* We don't need the distraction of CPUs appearing and vanishing. */
437 lock_cpu_hotplug(); 434 mutex_lock(&workqueue_mutex);
438 if (is_single_threaded(wq)) 435 if (is_single_threaded(wq))
439 cleanup_workqueue_thread(wq, singlethread_cpu); 436 cleanup_workqueue_thread(wq, singlethread_cpu);
440 else { 437 else {
441 for_each_online_cpu(cpu) 438 for_each_online_cpu(cpu)
442 cleanup_workqueue_thread(wq, cpu); 439 cleanup_workqueue_thread(wq, cpu);
443 spin_lock(&workqueue_lock);
444 list_del(&wq->list); 440 list_del(&wq->list);
445 spin_unlock(&workqueue_lock);
446 } 441 }
447 unlock_cpu_hotplug(); 442 mutex_unlock(&workqueue_mutex);
448 free_percpu(wq->cpu_wq); 443 free_percpu(wq->cpu_wq);
449 kfree(wq); 444 kfree(wq);
450} 445}
@@ -515,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
515 if (!works) 510 if (!works)
516 return -ENOMEM; 511 return -ENOMEM;
517 512
513 mutex_lock(&workqueue_mutex);
518 for_each_online_cpu(cpu) { 514 for_each_online_cpu(cpu) {
519 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 515 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
520 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 516 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
521 per_cpu_ptr(works, cpu)); 517 per_cpu_ptr(works, cpu));
522 } 518 }
519 mutex_unlock(&workqueue_mutex);
523 flush_workqueue(keventd_wq); 520 flush_workqueue(keventd_wq);
524 free_percpu(works); 521 free_percpu(works);
525 return 0; 522 return 0;
@@ -635,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
635 632
636 switch (action) { 633 switch (action) {
637 case CPU_UP_PREPARE: 634 case CPU_UP_PREPARE:
635 mutex_lock(&workqueue_mutex);
638 /* Create a new workqueue thread for it. */ 636 /* Create a new workqueue thread for it. */
639 list_for_each_entry(wq, &workqueues, list) { 637 list_for_each_entry(wq, &workqueues, list) {
640 if (!create_workqueue_thread(wq, hotcpu)) { 638 if (!create_workqueue_thread(wq, hotcpu)) {
@@ -653,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
653 kthread_bind(cwq->thread, hotcpu); 651 kthread_bind(cwq->thread, hotcpu);
654 wake_up_process(cwq->thread); 652 wake_up_process(cwq->thread);
655 } 653 }
654 mutex_unlock(&workqueue_mutex);
656 break; 655 break;
657 656
658 case CPU_UP_CANCELED: 657 case CPU_UP_CANCELED:
@@ -664,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
664 any_online_cpu(cpu_online_map)); 663 any_online_cpu(cpu_online_map));
665 cleanup_workqueue_thread(wq, hotcpu); 664 cleanup_workqueue_thread(wq, hotcpu);
666 } 665 }
666 mutex_unlock(&workqueue_mutex);
667 break;
668
669 case CPU_DOWN_PREPARE:
670 mutex_lock(&workqueue_mutex);
671 break;
672
673 case CPU_DOWN_FAILED:
674 mutex_unlock(&workqueue_mutex);
667 break; 675 break;
668 676
669 case CPU_DEAD: 677 case CPU_DEAD:
@@ -671,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
671 cleanup_workqueue_thread(wq, hotcpu); 679 cleanup_workqueue_thread(wq, hotcpu);
672 list_for_each_entry(wq, &workqueues, list) 680 list_for_each_entry(wq, &workqueues, list)
673 take_over_work(wq, hotcpu); 681 take_over_work(wq, hotcpu);
682 mutex_unlock(&workqueue_mutex);
674 break; 683 break;
675 } 684 }
676 685
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 0110e441480..d90822c378a 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -111,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g)
111 return ret; 111 return ret;
112} 112}
113 113
114static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, 114static void compute_prefix_tbl(struct ts_bm *bm)
115 unsigned int len)
116{ 115{
117 int i, j, g; 116 int i, j, g;
118 117
119 for (i = 0; i < ASIZE; i++) 118 for (i = 0; i < ASIZE; i++)
120 bm->bad_shift[i] = len; 119 bm->bad_shift[i] = bm->patlen;
121 for (i = 0; i < len - 1; i++) 120 for (i = 0; i < bm->patlen - 1; i++)
122 bm->bad_shift[pattern[i]] = len - 1 - i; 121 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
123 122
124 /* Compute the good shift array, used to match reocurrences 123 /* Compute the good shift array, used to match reocurrences
125 * of a subpattern */ 124 * of a subpattern */
@@ -150,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
150 bm = ts_config_priv(conf); 149 bm = ts_config_priv(conf);
151 bm->patlen = len; 150 bm->patlen = len;
152 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; 151 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
153 compute_prefix_tbl(bm, pattern, len);
154 memcpy(bm->pattern, pattern, len); 152 memcpy(bm->pattern, pattern, len);
153 compute_prefix_tbl(bm);
155 154
156 return conf; 155 return conf;
157} 156}
diff --git a/mm/swap.c b/mm/swap.c
index 8fd095c4ae5..687686a61f7 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -54,6 +54,26 @@ void put_page(struct page *page)
54} 54}
55EXPORT_SYMBOL(put_page); 55EXPORT_SYMBOL(put_page);
56 56
57/**
58 * put_pages_list(): release a list of pages
59 *
60 * Release a list of pages which are strung together on page.lru. Currently
61 * used by read_cache_pages() and related error recovery code.
62 *
63 * @pages: list of pages threaded on page->lru
64 */
65void put_pages_list(struct list_head *pages)
66{
67 while (!list_empty(pages)) {
68 struct page *victim;
69
70 victim = list_entry(pages->prev, struct page, lru);
71 list_del(&victim->lru);
72 page_cache_release(victim);
73 }
74}
75EXPORT_SYMBOL(put_pages_list);
76
57/* 77/*
58 * Writeback is about to end against a page which has been marked for immediate 78 * Writeback is about to end against a page which has been marked for immediate
59 * reclaim. If it still appears to be reclaimable, move it to the tail of the 79 * reclaim. If it still appears to be reclaimable, move it to the tail of the
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e70d6c6d6fe..f1f5ec78378 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -442,11 +442,12 @@ int swap_type_of(dev_t device)
442 442
443 if (!(swap_info[i].flags & SWP_WRITEOK)) 443 if (!(swap_info[i].flags & SWP_WRITEOK))
444 continue; 444 continue;
445
445 if (!device) { 446 if (!device) {
446 spin_unlock(&swap_lock); 447 spin_unlock(&swap_lock);
447 return i; 448 return i;
448 } 449 }
449 inode = swap_info->swap_file->f_dentry->d_inode; 450 inode = swap_info[i].swap_file->f_dentry->d_inode;
450 if (S_ISBLK(inode->i_mode) && 451 if (S_ISBLK(inode->i_mode) &&
451 device == MKDEV(imajor(inode), iminor(inode))) { 452 device == MKDEV(imajor(inode), iminor(inode))) {
452 spin_unlock(&swap_lock); 453 spin_unlock(&swap_lock);
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 3f95b0886a6..91fe5f53ff1 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -507,7 +507,7 @@ err_out:
507 goto out; 507 goto out;
508} 508}
509 509
510void __exit atm_proc_exit(void) 510void atm_proc_exit(void)
511{ 511{
512 atm_proc_dirs_remove(); 512 atm_proc_dirs_remove();
513} 513}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6ccd32b3080..864fbbc7b24 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -40,11 +40,15 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
40 else { 40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ 42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
43 nf_bridge_maybe_copy_header(skb); 43 if (nf_bridge_maybe_copy_header(skb))
44 kfree_skb(skb);
45 else
44#endif 46#endif
45 skb_push(skb, ETH_HLEN); 47 {
48 skb_push(skb, ETH_HLEN);
46 49
47 dev_queue_xmit(skb); 50 dev_queue_xmit(skb);
51 }
48 } 52 }
49 53
50 return 0; 54 return 0;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f55ef682ef8..b1211d5342f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
386 checksum = 0; 386 checksum = 0;
387 387
388 if (feature & NETIF_F_GSO) 388 if (feature & NETIF_F_GSO)
389 feature |= NETIF_F_TSO; 389 feature |= NETIF_F_GSO_SOFTWARE;
390 feature |= NETIF_F_GSO; 390 feature |= NETIF_F_GSO;
391 391
392 features &= feature; 392 features &= feature;
393 } 393 }
394 394
395 if (!(checksum & NETIF_F_ALL_CSUM))
396 features &= ~NETIF_F_SG;
397 if (!(features & NETIF_F_SG))
398 features &= ~NETIF_F_GSO_MASK;
399
395 br->dev->features = features | checksum | NETIF_F_LLTX | 400 br->dev->features = features | checksum | NETIF_F_LLTX |
396 NETIF_F_GSO_ROBUST; 401 NETIF_F_GSO_ROBUST;
397} 402}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 02693a230dc..9f950db3b76 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup)
74 if (timer_pending(&ub->timer)) 74 if (timer_pending(&ub->timer))
75 del_timer(&ub->timer); 75 del_timer(&ub->timer);
76 76
77 if (!ub->skb)
78 return;
79
77 /* last nlmsg needs NLMSG_DONE */ 80 /* last nlmsg needs NLMSG_DONE */
78 if (ub->qlen > 1) 81 if (ub->qlen > 1)
79 ub->lastnlh->nlmsg_type = NLMSG_DONE; 82 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/core/dev.c b/net/core/dev.c
index d95e2626d94..d4a1ec3bded 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
116#include <linux/audit.h> 116#include <linux/audit.h>
117#include <linux/dmaengine.h> 117#include <linux/dmaengine.h>
118#include <linux/err.h> 118#include <linux/err.h>
119#include <linux/ctype.h>
119 120
120/* 121/*
121 * The list of packet types we will receive (as opposed to discard) 122 * The list of packet types we will receive (as opposed to discard)
@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
632 * @name: name string 633 * @name: name string
633 * 634 *
634 * Network device names need to be valid file names to 635 * Network device names need to be valid file names to
635 * to allow sysfs to work 636 * to allow sysfs to work. We also disallow any kind of
637 * whitespace.
636 */ 638 */
637int dev_valid_name(const char *name) 639int dev_valid_name(const char *name)
638{ 640{
639 return !(*name == '\0' 641 if (*name == '\0')
640 || !strcmp(name, ".") 642 return 0;
641 || !strcmp(name, "..") 643 if (!strcmp(name, ".") || !strcmp(name, ".."))
642 || strchr(name, '/')); 644 return 0;
645
646 while (*name) {
647 if (*name == '/' || isspace(*name))
648 return 0;
649 name++;
650 }
651 return 1;
643} 652}
644 653
645/** 654/**
@@ -1619,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
1619 struct net_device *dev = skb->dev; 1628 struct net_device *dev = skb->dev;
1620 1629
1621 if (dev->master) { 1630 if (dev->master) {
1622 /* 1631 if (skb_bond_should_drop(skb)) {
1623 * On bonding slaves other than the currently active
1624 * slave, suppress duplicates except for 802.3ad
1625 * ETH_P_SLOW and alb non-mcast/bcast.
1626 */
1627 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1628 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1629 if (skb->pkt_type != PACKET_BROADCAST &&
1630 skb->pkt_type != PACKET_MULTICAST)
1631 goto keep;
1632 }
1633
1634 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1635 skb->protocol == __constant_htons(ETH_P_SLOW))
1636 goto keep;
1637
1638 kfree_skb(skb); 1632 kfree_skb(skb);
1639 return NULL; 1633 return NULL;
1640 } 1634 }
1641keep:
1642 skb->dev = dev->master; 1635 skb->dev = dev->master;
1643 } 1636 }
1644 1637
diff --git a/net/core/dst.c b/net/core/dst.c
index 470c05bc4cb..1a5e49da0e7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
95 dst_gc_timer_inc = DST_GC_INC; 95 dst_gc_timer_inc = DST_GC_INC;
96 dst_gc_timer_expires = DST_GC_MIN; 96 dst_gc_timer_expires = DST_GC_MIN;
97 } 97 }
98 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
99#if RT_CACHE_DEBUG >= 2 98#if RT_CACHE_DEBUG >= 2
100 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
101 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
102#endif 101#endif
103 add_timer(&dst_gc_timer); 102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
104 103
105out: 104out:
106 spin_unlock(&dst_lock); 105 spin_unlock(&dst_lock);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 67ed14ddabd..6a7320b39ed 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
2150 skb->dev = odev; 2150 skb->dev = odev;
2151 skb->pkt_type = PACKET_HOST; 2151 skb->pkt_type = PACKET_HOST;
2152 skb->nh.iph = iph;
2153 skb->h.uh = udph;
2152 2154
2153 if (pkt_dev->nfrags <= 0) 2155 if (pkt_dev->nfrags <= 0)
2154 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2156 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2460 skb->protocol = protocol; 2462 skb->protocol = protocol;
2461 skb->dev = odev; 2463 skb->dev = odev;
2462 skb->pkt_type = PACKET_HOST; 2464 skb->pkt_type = PACKET_HOST;
2465 skb->nh.ipv6h = iph;
2466 skb->h.uh = udph;
2463 2467
2464 if (pkt_dev->nfrags <= 0) 2468 if (pkt_dev->nfrags <= 0)
2465 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2469 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20e5bb73f14..30cc1ba6ed5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
394 } 394 }
395 395
396 if (ida[IFLA_ADDRESS - 1]) { 396 if (ida[IFLA_ADDRESS - 1]) {
397 struct sockaddr *sa;
398 int len;
399
397 if (!dev->set_mac_address) { 400 if (!dev->set_mac_address) {
398 err = -EOPNOTSUPP; 401 err = -EOPNOTSUPP;
399 goto out; 402 goto out;
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
405 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) 408 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
406 goto out; 409 goto out;
407 410
408 err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); 411 len = sizeof(sa_family_t) + dev->addr_len;
412 sa = kmalloc(len, GFP_KERNEL);
413 if (!sa) {
414 err = -ENOMEM;
415 goto out;
416 }
417 sa->sa_family = dev->type;
418 memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]),
419 dev->addr_len);
420 err = dev->set_mac_address(dev, sa);
421 kfree(sa);
409 if (err) 422 if (err)
410 goto out; 423 goto out;
411 send_addr_notify = 1; 424 send_addr_notify = 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 022d8894c11..c54f3664bce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
268 struct sk_buff *skb; 268 struct sk_buff *skb;
269 269
270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
271 if (likely(skb)) 271 if (likely(skb)) {
272 skb_reserve(skb, NET_SKB_PAD); 272 skb_reserve(skb, NET_SKB_PAD);
273 skb->dev = dev;
274 }
273 return skb; 275 return skb;
274} 276}
275 277
diff --git a/net/core/utils.c b/net/core/utils.c
index 4f96f389243..e31c90e0559 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -130,12 +130,13 @@ void __init net_random_init(void)
130static int net_random_reseed(void) 130static int net_random_reseed(void)
131{ 131{
132 int i; 132 int i;
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed;
134 134
135 get_random_bytes(seed, sizeof(seed));
136 for_each_possible_cpu(i) { 135 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 136 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 137
138 get_random_bytes(&seed, sizeof(seed));
139 __net_srandom(state, seed);
139 } 140 }
140 return 0; 141 return 0;
141} 142}
diff --git a/net/core/wireless.c b/net/core/wireless.c
index d2bc72d318f..de0bde4b51d 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -82,6 +82,7 @@
82#include <linux/init.h> /* for __init */ 82#include <linux/init.h> /* for __init */
83#include <linux/if_arp.h> /* ARPHRD_ETHER */ 83#include <linux/if_arp.h> /* ARPHRD_ETHER */
84#include <linux/etherdevice.h> /* compare_ether_addr */ 84#include <linux/etherdevice.h> /* compare_ether_addr */
85#include <linux/interrupt.h>
85 86
86#include <linux/wireless.h> /* Pretty obvious */ 87#include <linux/wireless.h> /* Pretty obvious */
87#include <net/iw_handler.h> /* New driver API */ 88#include <net/iw_handler.h> /* New driver API */
@@ -1842,6 +1843,18 @@ int wireless_rtnetlink_set(struct net_device * dev,
1842 */ 1843 */
1843 1844
1844#ifdef WE_EVENT_RTNETLINK 1845#ifdef WE_EVENT_RTNETLINK
1846static struct sk_buff_head wireless_nlevent_queue;
1847
1848static void wireless_nlevent_process(unsigned long data)
1849{
1850 struct sk_buff *skb;
1851
1852 while ((skb = skb_dequeue(&wireless_nlevent_queue)))
1853 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
1854}
1855
1856static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0);
1857
1845/* ---------------------------------------------------------------- */ 1858/* ---------------------------------------------------------------- */
1846/* 1859/*
1847 * Fill a rtnetlink message with our event data. 1860 * Fill a rtnetlink message with our event data.
@@ -1904,8 +1917,17 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1904 return; 1917 return;
1905 } 1918 }
1906 NETLINK_CB(skb).dst_group = RTNLGRP_LINK; 1919 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
1907 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); 1920 skb_queue_tail(&wireless_nlevent_queue, skb);
1921 tasklet_schedule(&wireless_nlevent_tasklet);
1922}
1923
1924static int __init wireless_nlevent_init(void)
1925{
1926 skb_queue_head_init(&wireless_nlevent_queue);
1927 return 0;
1908} 1928}
1929
1930subsys_initcall(wireless_nlevent_init);
1909#endif /* WE_EVENT_RTNETLINK */ 1931#endif /* WE_EVENT_RTNETLINK */
1910 1932
1911/* ---------------------------------------------------------------- */ 1933/* ---------------------------------------------------------------- */
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index c39bff706cf..090bc39e819 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
342 new_packet->dccphtx_ccval = 342 new_packet->dccphtx_ccval =
343 DCCP_SKB_CB(skb)->dccpd_ccval = 343 DCCP_SKB_CB(skb)->dccpd_ccval =
344 hctx->ccid3hctx_last_win_count; 344 hctx->ccid3hctx_last_win_count;
345 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
346 hctx->ccid3hctx_t_ipi);
345 } 347 }
346out: 348out:
347 return rc; 349 return rc;
@@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
413 case TFRC_SSTATE_NO_FBACK: 415 case TFRC_SSTATE_NO_FBACK:
414 case TFRC_SSTATE_FBACK: 416 case TFRC_SSTATE_FBACK:
415 if (len > 0) { 417 if (len > 0) {
416 hctx->ccid3hctx_t_nom = now; 418 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
419 hctx->ccid3hctx_t_ipi);
417 ccid3_calc_new_t_ipi(hctx); 420 ccid3_calc_new_t_ipi(hctx);
418 ccid3_calc_new_delta(hctx); 421 ccid3_calc_new_delta(hctx);
419 timeval_add_usecs(&hctx->ccid3hctx_t_nom, 422 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
@@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
757 } 760 }
758 761
759 hcrx->ccid3hcrx_tstamp_last_feedback = now; 762 hcrx->ccid3hcrx_tstamp_last_feedback = now;
760 hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; 763 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
761 hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
762 hcrx->ccid3hcrx_bytes_recv = 0; 764 hcrx->ccid3hcrx_bytes_recv = 0;
763 765
764 /* Convert to multiples of 10us */ 766 /* Convert to multiples of 10us */
@@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
782 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 784 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
783 return 0; 785 return 0;
784 786
785 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; 787 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
786 788
787 if (dccp_packet_without_ack(skb)) 789 if (dccp_packet_without_ack(skb))
788 return 0; 790 return 0;
@@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
854 interval = 1; 856 interval = 1;
855 } 857 }
856found: 858found:
859 if (!tail) {
860 LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
861 __FUNCTION__);
862 return ~0;
863 }
857 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; 864 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
858 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", 865 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
859 dccp_role(sk), sk, rtt); 866 dccp_role(sk), sk, rtt);
@@ -864,9 +871,20 @@ found:
864 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 871 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
865 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 872 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
866 873
874 if (x_recv == 0)
875 x_recv = hcrx->ccid3hcrx_x_recv;
876
867 tmp1 = (u64)x_recv * (u64)rtt; 877 tmp1 = (u64)x_recv * (u64)rtt;
868 do_div(tmp1,10000000); 878 do_div(tmp1,10000000);
869 tmp2 = (u32)tmp1; 879 tmp2 = (u32)tmp1;
880
881 if (!tmp2) {
882 LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
883 "%s: x_recv = %u, rtt =%u\n",
884 __FUNCTION__, x_recv, rtt);
885 return ~0;
886 }
887
870 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 888 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
871 /* do not alter order above or you will get overflow on 32 bit */ 889 /* do not alter order above or you will get overflow on 32 bit */
872 p = tfrc_calc_x_reverse_lookup(fval); 890 p = tfrc_calc_x_reverse_lookup(fval);
@@ -882,31 +900,101 @@ found:
882static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 900static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
883{ 901{
884 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 902 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
903 struct dccp_li_hist_entry *next, *head;
904 u64 seq_temp;
885 905
886 if (seq_loss != DCCP_MAX_SEQNO + 1 && 906 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
887 list_empty(&hcrx->ccid3hcrx_li_hist)) { 907 if (!dccp_li_hist_interval_new(ccid3_li_hist,
888 struct dccp_li_hist_entry *li_tail; 908 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
909 return;
889 910
890 li_tail = dccp_li_hist_interval_new(ccid3_li_hist, 911 next = (struct dccp_li_hist_entry *)
891 &hcrx->ccid3hcrx_li_hist, 912 hcrx->ccid3hcrx_li_hist.next;
892 seq_loss, win_loss); 913 next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
893 if (li_tail == NULL) 914 } else {
915 struct dccp_li_hist_entry *entry;
916 struct list_head *tail;
917
918 head = (struct dccp_li_hist_entry *)
919 hcrx->ccid3hcrx_li_hist.next;
920 /* FIXME win count check removed as was wrong */
921 /* should make this check with receive history */
922 /* and compare there as per section 10.2 of RFC4342 */
923
924 /* new loss event detected */
925 /* calculate last interval length */
926 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
927 entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
928
929 if (entry == NULL) {
930 printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
931 dump_stack();
894 return; 932 return;
895 li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); 933 }
896 } else 934
897 LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " 935 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
898 "interval\n", __FUNCTION__); 936
937 tail = hcrx->ccid3hcrx_li_hist.prev;
938 list_del(tail);
939 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
940
941 /* Create the newest interval */
942 entry->dccplih_seqno = seq_loss;
943 entry->dccplih_interval = seq_temp;
944 entry->dccplih_win_count = win_loss;
945 }
899} 946}
900 947
901static void ccid3_hc_rx_detect_loss(struct sock *sk) 948static int ccid3_hc_rx_detect_loss(struct sock *sk,
949 struct dccp_rx_hist_entry *packet)
902{ 950{
903 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 951 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
904 u8 win_loss; 952 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
905 const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, 953 u64 seqno = packet->dccphrx_seqno;
906 &hcrx->ccid3hcrx_li_hist, 954 u64 tmp_seqno;
907 &win_loss); 955 int loss = 0;
956 u8 ccval;
957
958
959 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
960
961 if (!rx_hist ||
962 follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
963 hcrx->ccid3hcrx_seqno_nonloss = seqno;
964 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
965 goto detect_out;
966 }
967
908 968
909 ccid3_hc_rx_update_li(sk, seq_loss, win_loss); 969 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
970 > TFRC_RECV_NUM_LATE_LOSS) {
971 loss = 1;
972 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
973 hcrx->ccid3hcrx_ccval_nonloss);
974 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
975 dccp_inc_seqno(&tmp_seqno);
976 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
977 dccp_inc_seqno(&tmp_seqno);
978 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
979 tmp_seqno, &ccval)) {
980 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
981 hcrx->ccid3hcrx_ccval_nonloss = ccval;
982 dccp_inc_seqno(&tmp_seqno);
983 }
984 }
985
986 /* FIXME - this code could be simplified with above while */
987 /* but works at moment */
988 if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
989 hcrx->ccid3hcrx_seqno_nonloss = seqno;
990 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
991 }
992
993detect_out:
994 dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
995 &hcrx->ccid3hcrx_li_hist, packet,
996 hcrx->ccid3hcrx_seqno_nonloss);
997 return loss;
910} 998}
911 999
912static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 1000static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
916 struct dccp_rx_hist_entry *packet; 1004 struct dccp_rx_hist_entry *packet;
917 struct timeval now; 1005 struct timeval now;
918 u8 win_count; 1006 u8 win_count;
919 u32 p_prev, r_sample, t_elapsed; 1007 u32 p_prev, rtt_prev, r_sample, t_elapsed;
920 int ins; 1008 int loss;
921 1009
922 BUG_ON(hcrx == NULL || 1010 BUG_ON(hcrx == NULL ||
923 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || 1011 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
@@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
932 case DCCP_PKT_DATAACK: 1020 case DCCP_PKT_DATAACK:
933 if (opt_recv->dccpor_timestamp_echo == 0) 1021 if (opt_recv->dccpor_timestamp_echo == 0)
934 break; 1022 break;
935 p_prev = hcrx->ccid3hcrx_rtt; 1023 rtt_prev = hcrx->ccid3hcrx_rtt;
936 dccp_timestamp(sk, &now); 1024 dccp_timestamp(sk, &now);
937 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); 1025 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
938 r_sample = timeval_usecs(&now); 1026 r_sample = timeval_usecs(&now);
@@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
951 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + 1039 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
952 r_sample / 10; 1040 r_sample / 10;
953 1041
954 if (p_prev != hcrx->ccid3hcrx_rtt) 1042 if (rtt_prev != hcrx->ccid3hcrx_rtt)
955 ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", 1043 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
956 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1044 dccp_role(sk), hcrx->ccid3hcrx_rtt,
957 opt_recv->dccpor_elapsed_time); 1045 opt_recv->dccpor_elapsed_time);
958 break; 1046 break;
@@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
973 1061
974 win_count = packet->dccphrx_ccval; 1062 win_count = packet->dccphrx_ccval;
975 1063
976 ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, 1064 loss = ccid3_hc_rx_detect_loss(sk, packet);
977 &hcrx->ccid3hcrx_li_hist, packet);
978 1065
979 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) 1066 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
980 return; 1067 return;
@@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
991 case TFRC_RSTATE_DATA: 1078 case TFRC_RSTATE_DATA:
992 hcrx->ccid3hcrx_bytes_recv += skb->len - 1079 hcrx->ccid3hcrx_bytes_recv += skb->len -
993 dccp_hdr(skb)->dccph_doff * 4; 1080 dccp_hdr(skb)->dccph_doff * 4;
994 if (ins != 0) 1081 if (loss)
995 break; 1082 break;
996 1083
997 dccp_timestamp(sk, &now); 1084 dccp_timestamp(sk, &now);
@@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1012 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1099 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1013 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1100 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1014 1101
1015 ccid3_hc_rx_detect_loss(sk);
1016 p_prev = hcrx->ccid3hcrx_p; 1102 p_prev = hcrx->ccid3hcrx_p;
1017 1103
1018 /* Calculate loss event rate */ 1104 /* Calculate loss event rate */
@@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1022 /* Scaling up by 1000000 as fixed decimal */ 1108 /* Scaling up by 1000000 as fixed decimal */
1023 if (i_mean != 0) 1109 if (i_mean != 0)
1024 hcrx->ccid3hcrx_p = 1000000 / i_mean; 1110 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1111 } else {
1112 printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
1113 dump_stack();
1025 } 1114 }
1026 1115
1027 if (hcrx->ccid3hcrx_p > p_prev) { 1116 if (hcrx->ccid3hcrx_p > p_prev) {
@@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void)
1230} 1319}
1231module_exit(ccid3_module_exit); 1320module_exit(ccid3_module_exit);
1232 1321
1233MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 1322MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
1234 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 1323 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1235MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); 1324MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1236MODULE_LICENSE("GPL"); 1325MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 5ade4f668b2..0a2cb7536d2 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/ccids/ccid3.h 2 * net/dccp/ccids/ccid3.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock {
120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
123 u64 ccid3hcrx_seqno_last_counter:48, 123 u64 ccid3hcrx_seqno_nonloss:48,
124 ccid3hcrx_ccval_nonloss:4,
124 ccid3hcrx_state:8, 125 ccid3hcrx_state:8,
125 ccid3hcrx_last_counter:4; 126 ccid3hcrx_ccval_last_counter:4;
126 u32 ccid3hcrx_bytes_recv; 127 u32 ccid3hcrx_bytes_recv;
127 struct timeval ccid3hcrx_tstamp_last_feedback; 128 struct timeval ccid3hcrx_tstamp_last_feedback;
128 struct timeval ccid3hcrx_tstamp_last_ack; 129 struct timeval ccid3hcrx_tstamp_last_ack;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5d7b7d86438..906c81ab9d4 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/loss_interval.c 2 * net/dccp/ccids/lib/loss_interval.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <net/sock.h>
15 16
16#include "loss_interval.h" 17#include "loss_interval.h"
17 18
@@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
90 u32 w_tot = 0; 91 u32 w_tot = 0;
91 92
92 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { 93 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
93 if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { 94 if (li_entry->dccplih_interval != ~0) {
94 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; 95 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
95 w_tot += dccp_li_hist_w[i]; 96 w_tot += dccp_li_hist_w[i];
97 if (i != 0)
98 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
96 } 99 }
97 100
98 if (i != 0)
99 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
100 101
101 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) 102 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH)
102 break; 103 break;
@@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
107 108
108 i_tot = max(i_tot0, i_tot1); 109 i_tot = max(i_tot0, i_tot1);
109 110
110 /* FIXME: Why do we do this? -Ian McDonald */ 111 if (!w_tot) {
111 if (i_tot * 4 < w_tot) 112 LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__);
112 i_tot = w_tot * 4; 113 return 1;
114 }
113 115
114 return i_tot * 4 / w_tot; 116 return i_tot / w_tot;
115} 117}
116 118
117EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); 119EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
118 120
119struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, 121int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
120 struct list_head *list, 122 struct list_head *list, const u64 seq_loss, const u8 win_loss)
121 const u64 seq_loss,
122 const u8 win_loss)
123{ 123{
124 struct dccp_li_hist_entry *tail = NULL, *entry; 124 struct dccp_li_hist_entry *entry;
125 int i; 125 int i;
126 126
127 for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { 127 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); 128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
129 if (entry == NULL) { 129 if (entry == NULL) {
130 dccp_li_hist_purge(hist, list); 130 dccp_li_hist_purge(hist, list);
131 return NULL; 131 dump_stack();
132 return 0;
132 } 133 }
133 if (tail == NULL) 134 entry->dccplih_interval = ~0;
134 tail = entry;
135 list_add(&entry->dccplih_node, list); 135 list_add(&entry->dccplih_node, list);
136 } 136 }
137 137
138 entry->dccplih_seqno = seq_loss; 138 entry->dccplih_seqno = seq_loss;
139 entry->dccplih_win_count = win_loss; 139 entry->dccplih_win_count = win_loss;
140 return tail; 140 return 1;
141} 141}
142 142
143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); 143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 43bf78269d1..0ae85f0340b 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/loss_interval.h 4 * net/dccp/ccids/lib/loss_interval.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
@@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
52 52
53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); 53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
54 54
55extern struct dccp_li_hist_entry * 55extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
56 dccp_li_hist_interval_new(struct dccp_li_hist *hist, 56 struct list_head *list, const u64 seq_loss, const u8 win_loss);
57 struct list_head *list,
58 const u64 seq_loss,
59 const u8 win_loss);
60#endif /* _DCCP_LI_HIST_ */ 57#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index ad98d6a322e..b876c9c81c6 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -112,64 +112,27 @@ struct dccp_rx_hist_entry *
112 112
113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); 113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet);
114 114
115int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 115void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
116 struct list_head *rx_list, 116 struct list_head *rx_list,
117 struct list_head *li_list, 117 struct list_head *li_list,
118 struct dccp_rx_hist_entry *packet) 118 struct dccp_rx_hist_entry *packet,
119 u64 nonloss_seqno)
119{ 120{
120 struct dccp_rx_hist_entry *entry, *next, *iter; 121 struct dccp_rx_hist_entry *entry, *next;
121 u8 num_later = 0; 122 u8 num_later = 0;
122 123
123 iter = dccp_rx_hist_head(rx_list); 124 list_add(&packet->dccphrx_node, rx_list);
124 if (iter == NULL)
125 dccp_rx_hist_add_entry(rx_list, packet);
126 else {
127 const u64 seqno = packet->dccphrx_seqno;
128
129 if (after48(seqno, iter->dccphrx_seqno))
130 dccp_rx_hist_add_entry(rx_list, packet);
131 else {
132 if (dccp_rx_hist_entry_data_packet(iter))
133 num_later = 1;
134
135 list_for_each_entry_continue(iter, rx_list,
136 dccphrx_node) {
137 if (after48(seqno, iter->dccphrx_seqno)) {
138 dccp_rx_hist_add_entry(&iter->dccphrx_node,
139 packet);
140 goto trim_history;
141 }
142
143 if (dccp_rx_hist_entry_data_packet(iter))
144 num_later++;
145 125
146 if (num_later == TFRC_RECV_NUM_LATE_LOSS) {
147 dccp_rx_hist_entry_delete(hist, packet);
148 return 1;
149 }
150 }
151
152 if (num_later < TFRC_RECV_NUM_LATE_LOSS)
153 dccp_rx_hist_add_entry(rx_list, packet);
154 /*
155 * FIXME: else what? should we destroy the packet
156 * like above?
157 */
158 }
159 }
160
161trim_history:
162 /*
163 * Trim history (remove all packets after the NUM_LATE_LOSS + 1
164 * data packets)
165 */
166 num_later = TFRC_RECV_NUM_LATE_LOSS + 1; 126 num_later = TFRC_RECV_NUM_LATE_LOSS + 1;
167 127
168 if (!list_empty(li_list)) { 128 if (!list_empty(li_list)) {
169 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { 129 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
170 if (num_later == 0) { 130 if (num_later == 0) {
171 list_del_init(&entry->dccphrx_node); 131 if (after48(nonloss_seqno,
172 dccp_rx_hist_entry_delete(hist, entry); 132 entry->dccphrx_seqno)) {
133 list_del_init(&entry->dccphrx_node);
134 dccp_rx_hist_entry_delete(hist, entry);
135 }
173 } else if (dccp_rx_hist_entry_data_packet(entry)) 136 } else if (dccp_rx_hist_entry_data_packet(entry))
174 --num_later; 137 --num_later;
175 } 138 }
@@ -217,94 +180,10 @@ trim_history:
217 --num_later; 180 --num_later;
218 } 181 }
219 } 182 }
220
221 return 0;
222} 183}
223 184
224EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
225 186
226u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
227 struct list_head *li_list, u8 *win_loss)
228{
229 struct dccp_rx_hist_entry *entry, *next, *packet;
230 struct dccp_rx_hist_entry *a_loss = NULL;
231 struct dccp_rx_hist_entry *b_loss = NULL;
232 u64 seq_loss = DCCP_MAX_SEQNO + 1;
233 u8 num_later = TFRC_RECV_NUM_LATE_LOSS;
234
235 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
236 if (num_later == 0) {
237 b_loss = entry;
238 break;
239 } else if (dccp_rx_hist_entry_data_packet(entry))
240 --num_later;
241 }
242
243 if (b_loss == NULL)
244 goto out;
245
246 num_later = 1;
247 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
248 if (num_later == 0) {
249 a_loss = entry;
250 break;
251 } else if (dccp_rx_hist_entry_data_packet(entry))
252 --num_later;
253 }
254
255 if (a_loss == NULL) {
256 if (list_empty(li_list)) {
257 /* no loss event have occured yet */
258 LIMIT_NETDEBUG("%s: TODO: find a lost data packet by "
259 "comparing to initial seqno\n",
260 __FUNCTION__);
261 goto out;
262 } else {
263 LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!",
264 __FUNCTION__);
265 goto out;
266 }
267 }
268
269 /* Locate a lost data packet */
270 entry = packet = b_loss;
271 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
272 u64 delta = dccp_delta_seqno(entry->dccphrx_seqno,
273 packet->dccphrx_seqno);
274
275 if (delta != 0) {
276 if (dccp_rx_hist_entry_data_packet(packet))
277 --delta;
278 /*
279 * FIXME: check this, probably this % usage is because
280 * in earlier drafts the ndp count was just 8 bits
281 * long, but now it cam be up to 24 bits long.
282 */
283#if 0
284 if (delta % DCCP_NDP_LIMIT !=
285 (packet->dccphrx_ndp -
286 entry->dccphrx_ndp) % DCCP_NDP_LIMIT)
287#endif
288 if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) {
289 seq_loss = entry->dccphrx_seqno;
290 dccp_inc_seqno(&seq_loss);
291 }
292 }
293 packet = entry;
294 if (packet == a_loss)
295 break;
296 }
297out:
298 if (seq_loss != DCCP_MAX_SEQNO + 1)
299 *win_loss = a_loss->dccphrx_ccval;
300 else
301 *win_loss = 0; /* Paranoia */
302
303 return seq_loss;
304}
305
306EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss);
307
308struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 187struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
309{ 188{
310 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -365,6 +244,25 @@ struct dccp_tx_hist_entry *
365 244
366EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); 245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
367 246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
368void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
369 struct list_head *list, 267 struct list_head *list,
370 struct dccp_tx_hist_entry *packet) 268 struct dccp_tx_hist_entry *packet)
@@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
391 289
392EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); 290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
393 291
394MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
395 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
396MODULE_DESCRIPTION("DCCP TFRC library"); 294MODULE_DESCRIPTION("DCCP TFRC library");
397MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 673c209e4e8..067cf1c85a3 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
106extern struct dccp_tx_hist_entry * 106extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 107 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 108 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
109 111
110static inline void dccp_tx_hist_add_entry(struct list_head *list, 112static inline void dccp_tx_hist_add_entry(struct list_head *list,
111 struct dccp_tx_hist_entry *entry) 113 struct dccp_tx_hist_entry *entry)
@@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
164extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, 166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
165 struct list_head *list); 167 struct list_head *list);
166 168
167static inline void dccp_rx_hist_add_entry(struct list_head *list,
168 struct dccp_rx_hist_entry *entry)
169{
170 list_add(&entry->dccphrx_node, list);
171}
172
173static inline struct dccp_rx_hist_entry * 169static inline struct dccp_rx_hist_entry *
174 dccp_rx_hist_head(struct list_head *list) 170 dccp_rx_hist_head(struct list_head *list)
175{ 171{
@@ -188,10 +184,11 @@ static inline int
188 entry->dccphrx_type == DCCP_PKT_DATAACK; 184 entry->dccphrx_type == DCCP_PKT_DATAACK;
189} 185}
190 186
191extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
192 struct list_head *rx_list, 188 struct list_head *rx_list,
193 struct list_head *li_list, 189 struct list_head *li_list,
194 struct dccp_rx_hist_entry *packet); 190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
195 192
196extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
197 struct list_head *li_list, u8 *win_loss); 194 struct list_head *li_list, u8 *win_loss);
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 130c4c40cfe..45f30f59ea2 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/tfrc.h 4 * net/dccp/ccids/lib/tfrc.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
10 * 10 *
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 4fd2ebebf5a..44076e0c659 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/tfrc_equation.c 2 * net/dccp/ccids/lib/tfrc_equation.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
8 * 8 *
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d00a2f4ee5d..a5c5475724c 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 8 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as 11 * under the terms of the GNU General Public License version 2 as
@@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
81 return after48(seq1, seq2) ? seq1 : seq2; 81 return after48(seq1, seq2) ? seq1 : seq2;
82} 82}
83 83
84/* is seq1 next seqno after seq2 */
85static inline int follows48(const u64 seq1, const u64 seq2)
86{
87 int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF);
88
89 return diff==1;
90}
91
84enum { 92enum {
85 DCCP_MIB_NUM = 0, 93 DCCP_MIB_NUM = 0,
86 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 94 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
diff --git a/net/dccp/options.c b/net/dccp/options.c
index daf72bb671f..07a34696ac9 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -4,7 +4,7 @@
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> 5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 9be53a8e72c..51738000f3d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
159 159
160void fib_release_info(struct fib_info *fi) 160void fib_release_info(struct fib_info *fi)
161{ 161{
162 write_lock(&fib_info_lock); 162 write_lock_bh(&fib_info_lock);
163 if (fi && --fi->fib_treeref == 0) { 163 if (fi && --fi->fib_treeref == 0) {
164 hlist_del(&fi->fib_hash); 164 hlist_del(&fi->fib_hash);
165 if (fi->fib_prefsrc) 165 if (fi->fib_prefsrc)
@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
172 fi->fib_dead = 1; 172 fi->fib_dead = 1;
173 fib_info_put(fi); 173 fib_info_put(fi);
174 } 174 }
175 write_unlock(&fib_info_lock); 175 write_unlock_bh(&fib_info_lock);
176} 176}
177 177
178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
598 unsigned int old_size = fib_hash_size; 598 unsigned int old_size = fib_hash_size;
599 unsigned int i, bytes; 599 unsigned int i, bytes;
600 600
601 write_lock(&fib_info_lock); 601 write_lock_bh(&fib_info_lock);
602 old_info_hash = fib_info_hash; 602 old_info_hash = fib_info_hash;
603 old_laddrhash = fib_info_laddrhash; 603 old_laddrhash = fib_info_laddrhash;
604 fib_hash_size = new_size; 604 fib_hash_size = new_size;
@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
639 } 639 }
640 fib_info_laddrhash = new_laddrhash; 640 fib_info_laddrhash = new_laddrhash;
641 641
642 write_unlock(&fib_info_lock); 642 write_unlock_bh(&fib_info_lock);
643 643
644 bytes = old_size * sizeof(struct hlist_head *); 644 bytes = old_size * sizeof(struct hlist_head *);
645 fib_hash_free(old_info_hash, bytes); 645 fib_hash_free(old_info_hash, bytes);
@@ -820,7 +820,7 @@ link_it:
820 820
821 fi->fib_treeref++; 821 fi->fib_treeref++;
822 atomic_inc(&fi->fib_clntref); 822 atomic_inc(&fi->fib_clntref);
823 write_lock(&fib_info_lock); 823 write_lock_bh(&fib_info_lock);
824 hlist_add_head(&fi->fib_hash, 824 hlist_add_head(&fi->fib_hash,
825 &fib_info_hash[fib_info_hashfn(fi)]); 825 &fib_info_hash[fib_info_hashfn(fi)]);
826 if (fi->fib_prefsrc) { 826 if (fi->fib_prefsrc) {
@@ -839,7 +839,7 @@ link_it:
839 head = &fib_info_devhash[hash]; 839 head = &fib_info_devhash[hash];
840 hlist_add_head(&nh->nh_hash, head); 840 hlist_add_head(&nh->nh_hash, head);
841 } endfor_nexthops(fi) 841 } endfor_nexthops(fi)
842 write_unlock(&fib_info_lock); 842 write_unlock_bh(&fib_info_lock);
843 return fi; 843 return fi;
844 844
845err_inval: 845err_inval:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9f4b752f5a3..8e8117c19e4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1793 struct in_device *in_dev; 1793 struct in_device *in_dev;
1794 u32 group = imr->imr_multiaddr.s_addr; 1794 u32 group = imr->imr_multiaddr.s_addr;
1795 u32 ifindex; 1795 u32 ifindex;
1796 int ret = -EADDRNOTAVAIL;
1796 1797
1797 rtnl_lock(); 1798 rtnl_lock();
1798 in_dev = ip_mc_find_dev(imr); 1799 in_dev = ip_mc_find_dev(imr);
1799 if (!in_dev) {
1800 rtnl_unlock();
1801 return -ENODEV;
1802 }
1803 ifindex = imr->imr_ifindex; 1800 ifindex = imr->imr_ifindex;
1804 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1801 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1805 if (iml->multi.imr_multiaddr.s_addr == group && 1802 if (iml->multi.imr_multiaddr.s_addr != group)
1806 iml->multi.imr_ifindex == ifindex) { 1803 continue;
1807 (void) ip_mc_leave_src(sk, iml, in_dev); 1804 if (ifindex) {
1805 if (iml->multi.imr_ifindex != ifindex)
1806 continue;
1807 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1808 iml->multi.imr_address.s_addr)
1809 continue;
1810
1811 (void) ip_mc_leave_src(sk, iml, in_dev);
1808 1812
1809 *imlp = iml->next; 1813 *imlp = iml->next;
1810 1814
1815 if (in_dev)
1811 ip_mc_dec_group(in_dev, group); 1816 ip_mc_dec_group(in_dev, group);
1812 rtnl_unlock(); 1817 rtnl_unlock();
1813 sock_kfree_s(sk, iml, sizeof(*iml)); 1818 sock_kfree_s(sk, iml, sizeof(*iml));
1814 return 0; 1819 return 0;
1815 }
1816 } 1820 }
1821 if (!in_dev)
1822 ret = -ENODEV;
1817 rtnl_unlock(); 1823 rtnl_unlock();
1818 return -EADDRNOTAVAIL; 1824 return ret;
1819} 1825}
1820 1826
1821int ip_mc_source(int add, int omode, struct sock *sk, struct 1827int ip_mc_source(int add, int omode, struct sock *sk, struct
@@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
2199 struct in_device *in_dev; 2205 struct in_device *in_dev;
2200 inet->mc_list = iml->next; 2206 inet->mc_list = iml->next;
2201 2207
2202 if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { 2208 in_dev = inetdev_by_index(iml->multi.imr_ifindex);
2203 (void) ip_mc_leave_src(sk, iml, in_dev); 2209 (void) ip_mc_leave_src(sk, iml, in_dev);
2210 if (in_dev != NULL) {
2204 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2211 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2205 in_dev_put(in_dev); 2212 in_dev_put(in_dev);
2206 } 2213 }
2207 sock_kfree_s(sk, iml, sizeof(*iml)); 2214 sock_kfree_s(sk, iml, sizeof(*iml));
2208
2209 } 2215 }
2210 rtnl_unlock(); 2216 rtnl_unlock();
2211} 2217}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9bf307a2978..4c20f554689 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -947,7 +947,7 @@ alloc_new_skb:
947 skb_prev->csum = csum_sub(skb_prev->csum, 947 skb_prev->csum = csum_sub(skb_prev->csum,
948 skb->csum); 948 skb->csum);
949 data += fraggap; 949 data += fraggap;
950 skb_trim(skb_prev, maxfraglen); 950 pskb_trim_unique(skb_prev, maxfraglen);
951 } 951 }
952 952
953 copy = datalen - transhdrlen - fraggap; 953 copy = datalen - transhdrlen - fraggap;
@@ -1142,7 +1142,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1142 data, fraggap, 0); 1142 data, fraggap, 0);
1143 skb_prev->csum = csum_sub(skb_prev->csum, 1143 skb_prev->csum = csum_sub(skb_prev->csum,
1144 skb->csum); 1144 skb->csum);
1145 skb_trim(skb_prev, maxfraglen); 1145 pskb_trim_unique(skb_prev, maxfraglen);
1146 } 1146 }
1147 1147
1148 /* 1148 /*
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 80c73ca9011..8d1d7a6e72a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
236 struct arpt_entry *e, *back; 236 struct arpt_entry *e, *back;
237 const char *indev, *outdev; 237 const char *indev, *outdev;
238 void *table_base; 238 void *table_base;
239 struct xt_table_info *private = table->private; 239 struct xt_table_info *private;
240 240
241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + 242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
248 outdev = out ? out->name : nulldevname; 248 outdev = out ? out->name : nulldevname;
249 249
250 read_lock_bh(&table->lock); 250 read_lock_bh(&table->lock);
251 private = table->private;
251 table_base = (void *)private->entries[smp_processor_id()]; 252 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]); 253 e = get_entry(table_base, private->hook_entry[hook]);
253 back = get_entry(table_base, private->underflow[hook]); 254 back = get_entry(table_base, private->underflow[hook]);
@@ -1170,21 +1171,34 @@ static int __init arp_tables_init(void)
1170{ 1171{
1171 int ret; 1172 int ret;
1172 1173
1173 xt_proto_init(NF_ARP); 1174 ret = xt_proto_init(NF_ARP);
1175 if (ret < 0)
1176 goto err1;
1174 1177
1175 /* Noone else will be downing sem now, so we won't sleep */ 1178 /* Noone else will be downing sem now, so we won't sleep */
1176 xt_register_target(&arpt_standard_target); 1179 ret = xt_register_target(&arpt_standard_target);
1177 xt_register_target(&arpt_error_target); 1180 if (ret < 0)
1181 goto err2;
1182 ret = xt_register_target(&arpt_error_target);
1183 if (ret < 0)
1184 goto err3;
1178 1185
1179 /* Register setsockopt */ 1186 /* Register setsockopt */
1180 ret = nf_register_sockopt(&arpt_sockopts); 1187 ret = nf_register_sockopt(&arpt_sockopts);
1181 if (ret < 0) { 1188 if (ret < 0)
1182 duprintf("Unable to register sockopts.\n"); 1189 goto err4;
1183 return ret;
1184 }
1185 1190
1186 printk("arp_tables: (C) 2002 David S. Miller\n"); 1191 printk("arp_tables: (C) 2002 David S. Miller\n");
1187 return 0; 1192 return 0;
1193
1194err4:
1195 xt_unregister_target(&arpt_error_target);
1196err3:
1197 xt_unregister_target(&arpt_standard_target);
1198err2:
1199 xt_proto_fini(NF_ARP);
1200err1:
1201 return ret;
1188} 1202}
1189 1203
1190static void __exit arp_tables_fini(void) 1204static void __exit arp_tables_fini(void)
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 33891bb1fde..0d4cc92391f 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
415 cb->args[0], *id); 415 cb->args[0], *id);
416 416
417 read_lock_bh(&ip_conntrack_lock); 417 read_lock_bh(&ip_conntrack_lock);
418 last = (struct ip_conntrack *)cb->args[1];
418 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { 419 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
419restart: 420restart:
420 last = (struct ip_conntrack *)cb->args[1];
421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { 421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
422 h = (struct ip_conntrack_tuple_hash *) i; 422 h = (struct ip_conntrack_tuple_hash *) i;
423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
424 continue; 424 continue;
425 ct = tuplehash_to_ctrack(h); 425 ct = tuplehash_to_ctrack(h);
426 if (last != NULL) { 426 if (cb->args[1]) {
427 if (ct == last) { 427 if (ct != last)
428 ip_conntrack_put(last);
429 cb->args[1] = 0;
430 last = NULL;
431 } else
432 continue; 428 continue;
429 cb->args[1] = 0;
433 } 430 }
434 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 431 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
435 cb->nlh->nlmsg_seq, 432 cb->nlh->nlmsg_seq,
@@ -440,17 +437,17 @@ restart:
440 goto out; 437 goto out;
441 } 438 }
442 } 439 }
443 if (last != NULL) { 440 if (cb->args[1]) {
444 ip_conntrack_put(last);
445 cb->args[1] = 0; 441 cb->args[1] = 0;
446 goto restart; 442 goto restart;
447 } 443 }
448 } 444 }
449out: 445out:
450 read_unlock_bh(&ip_conntrack_lock); 446 read_unlock_bh(&ip_conntrack_lock);
447 if (last)
448 ip_conntrack_put(last);
451 449
452 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 450 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
453
454 return skb->len; 451 return skb->len;
455} 452}
456 453
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc5bdd5eb7d..048514f15f2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
230 const char *indev, *outdev; 230 const char *indev, *outdev;
231 void *table_base; 231 void *table_base;
232 struct ipt_entry *e, *back; 232 struct ipt_entry *e, *back;
233 struct xt_table_info *private = table->private; 233 struct xt_table_info *private;
234 234
235 /* Initialization */ 235 /* Initialization */
236 ip = (*pskb)->nh.iph; 236 ip = (*pskb)->nh.iph;
@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
247 247
248 read_lock_bh(&table->lock); 248 read_lock_bh(&table->lock);
249 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 249 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250 private = table->private;
250 table_base = (void *)private->entries[smp_processor_id()]; 251 table_base = (void *)private->entries[smp_processor_id()];
251 e = get_entry(table_base, private->hook_entry[hook]); 252 e = get_entry(table_base, private->hook_entry[hook]);
252 253
@@ -2239,22 +2240,39 @@ static int __init ip_tables_init(void)
2239{ 2240{
2240 int ret; 2241 int ret;
2241 2242
2242 xt_proto_init(AF_INET); 2243 ret = xt_proto_init(AF_INET);
2244 if (ret < 0)
2245 goto err1;
2243 2246
2244 /* Noone else will be downing sem now, so we won't sleep */ 2247 /* Noone else will be downing sem now, so we won't sleep */
2245 xt_register_target(&ipt_standard_target); 2248 ret = xt_register_target(&ipt_standard_target);
2246 xt_register_target(&ipt_error_target); 2249 if (ret < 0)
2247 xt_register_match(&icmp_matchstruct); 2250 goto err2;
2251 ret = xt_register_target(&ipt_error_target);
2252 if (ret < 0)
2253 goto err3;
2254 ret = xt_register_match(&icmp_matchstruct);
2255 if (ret < 0)
2256 goto err4;
2248 2257
2249 /* Register setsockopt */ 2258 /* Register setsockopt */
2250 ret = nf_register_sockopt(&ipt_sockopts); 2259 ret = nf_register_sockopt(&ipt_sockopts);
2251 if (ret < 0) { 2260 if (ret < 0)
2252 duprintf("Unable to register sockopts.\n"); 2261 goto err5;
2253 return ret;
2254 }
2255 2262
2256 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); 2263 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2257 return 0; 2264 return 0;
2265
2266err5:
2267 xt_unregister_match(&icmp_matchstruct);
2268err4:
2269 xt_unregister_target(&ipt_error_target);
2270err3:
2271 xt_unregister_target(&ipt_standard_target);
2272err2:
2273 xt_proto_fini(AF_INET);
2274err1:
2275 return ret;
2258} 2276}
2259 2277
2260static void __exit ip_tables_fini(void) 2278static void __exit ip_tables_fini(void)
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index d7dd7fe7051..d46fd677fa1 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum)
115 del_timer(&ub->timer); 115 del_timer(&ub->timer);
116 } 116 }
117 117
118 if (!ub->skb) {
119 DEBUGP("ipt_ULOG: ulog_send: nothing to send\n");
120 return;
121 }
122
118 /* last nlmsg needs NLMSG_DONE */ 123 /* last nlmsg needs NLMSG_DONE */
119 if (ub->qlen > 1) 124 if (ub->qlen > 1)
120 ub->lastnlh->nlmsg_type = NLMSG_DONE; 125 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 6b662449e82..3bd2368e1fc 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb,
454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * 454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
455 hinfo->cfg.burst); 455 hinfo->cfg.burst);
456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg); 456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
457 457 } else {
458 spin_unlock_bh(&hinfo->lock); 458 /* update expiration timeout */
459 return 1; 459 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
460 rateinfo_recalc(dh, now);
460 } 461 }
461 462
462 /* update expiration timeout */
463 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
464
465 rateinfo_recalc(dh, now);
466 if (dh->rateinfo.credit >= dh->rateinfo.cost) { 463 if (dh->rateinfo.credit >= dh->rateinfo.cost) {
467 /* We're underlimit. */ 464 /* We're underlimit. */
468 dh->rateinfo.credit -= dh->rateinfo.cost; 465 dh->rateinfo.credit -= dh->rateinfo.cost;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 19bd49d69d9..b873cbcdd0b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3157,7 +3157,7 @@ int __init ip_rt_init(void)
3157 rhash_entries, 3157 rhash_entries,
3158 (num_physpages >= 128 * 1024) ? 3158 (num_physpages >= 128 * 1024) ?
3159 15 : 17, 3159 15 : 17,
3160 HASH_HIGHMEM, 3160 0,
3161 &rt_hash_log, 3161 &rt_hash_log,
3162 &rt_hash_mask, 3162 &rt_hash_mask,
3163 0); 3163 0);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 5765f9d0317..7ff2e4273a7 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -189,7 +189,7 @@ void tcp_slow_start(struct tcp_sock *tp)
189 return; 189 return;
190 190
191 /* We MAY increase by 2 if discovered delayed ack */ 191 /* We MAY increase by 2 if discovered delayed ack */
192 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) { 192 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) {
193 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 193 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
194 tp->snd_cwnd++; 194 tp->snd_cwnd++;
195 } 195 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 104af5d5bcb..111ff39a08c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2505,8 +2505,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2505 if (before(ack, prior_snd_una)) 2505 if (before(ack, prior_snd_una))
2506 goto old_ack; 2506 goto old_ack;
2507 2507
2508 if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR) 2508 if (sysctl_tcp_abc) {
2509 tp->bytes_acked += ack - prior_snd_una; 2509 if (icsk->icsk_ca_state < TCP_CA_CWR)
2510 tp->bytes_acked += ack - prior_snd_una;
2511 else if (icsk->icsk_ca_state == TCP_CA_Loss)
2512 /* we assume just one segment left network */
2513 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
2514 }
2510 2515
2511 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 2516 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2512 /* Window is constant, pure forward advance. 2517 /* Window is constant, pure forward advance.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5c08ea20a18..b4f3ffe1b3b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
201 * See RFC1323 for an explanation of the limit to 14 201 * See RFC1323 for an explanation of the limit to 14
202 */ 202 */
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
204 while (space > 65535 && (*rcv_wscale) < 14) { 205 while (space > 65535 && (*rcv_wscale) < 14) {
205 space >>= 1; 206 space >>= 1;
206 (*rcv_wscale)++; 207 (*rcv_wscale)++;
@@ -466,7 +467,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
466 if (skb->len != tcp_header_size) 467 if (skb->len != tcp_header_size)
467 tcp_event_data_sent(tp, skb, sk); 468 tcp_event_data_sent(tp, skb, sk);
468 469
469 TCP_INC_STATS(TCP_MIB_OUTSEGS); 470 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
471 TCP_INC_STATS(TCP_MIB_OUTSEGS);
470 472
471 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 473 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
472 if (likely(err <= 0)) 474 if (likely(err <= 0))
@@ -2157,10 +2159,9 @@ int tcp_connect(struct sock *sk)
2157 skb_shinfo(buff)->gso_size = 0; 2159 skb_shinfo(buff)->gso_size = 0;
2158 skb_shinfo(buff)->gso_type = 0; 2160 skb_shinfo(buff)->gso_type = 0;
2159 buff->csum = 0; 2161 buff->csum = 0;
2162 tp->snd_nxt = tp->write_seq;
2160 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2163 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2161 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2164 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2162 tp->snd_nxt = tp->write_seq;
2163 tp->pushed_seq = tp->write_seq;
2164 2165
2165 /* Send it off. */ 2166 /* Send it off. */
2166 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2167 TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2170,6 +2171,12 @@ int tcp_connect(struct sock *sk)
2170 sk_charge_skb(sk, buff); 2171 sk_charge_skb(sk, buff);
2171 tp->packets_out += tcp_skb_pcount(buff); 2172 tp->packets_out += tcp_skb_pcount(buff);
2172 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2173 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2174
2175 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2176 * in order to make this packet get counted in tcpOutSegs.
2177 */
2178 tp->snd_nxt = tp->write_seq;
2179 tp->pushed_seq = tp->write_seq;
2173 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2180 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2174 2181
2175 /* Timer for repeating the SYN until an answer. */ 2182 /* Timer for repeating the SYN until an answer. */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index b3435324b57..dab37d2f65f 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
130 error = wait_event_interruptible(tcpw.wait, 130 error = wait_event_interruptible(tcpw.wait,
131 __kfifo_len(tcpw.fifo) != 0); 131 __kfifo_len(tcpw.fifo) != 0);
132 if (error) 132 if (error)
133 return error; 133 goto out_free;
134 134
135 cnt = kfifo_get(tcpw.fifo, tbuf, len); 135 cnt = kfifo_get(tcpw.fifo, tbuf, len);
136 error = copy_to_user(buf, tbuf, cnt); 136 error = copy_to_user(buf, tbuf, cnt);
137 137
138out_free:
138 vfree(tbuf); 139 vfree(tbuf);
139 140
140 return error ? error : cnt; 141 return error ? error : cnt;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8ea1e36bf8e..c7852b38e03 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -578,6 +578,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
578 ifa->flags = flags | IFA_F_TENTATIVE; 578 ifa->flags = flags | IFA_F_TENTATIVE;
579 ifa->cstamp = ifa->tstamp = jiffies; 579 ifa->cstamp = ifa->tstamp = jiffies;
580 580
581 ifa->rt = rt;
582
581 ifa->idev = idev; 583 ifa->idev = idev;
582 in6_dev_hold(idev); 584 in6_dev_hold(idev);
583 /* For caller */ 585 /* For caller */
@@ -603,8 +605,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
603 } 605 }
604#endif 606#endif
605 607
606 ifa->rt = rt;
607
608 in6_ifa_hold(ifa); 608 in6_ifa_hold(ifa);
609 write_unlock(&idev->lock); 609 write_unlock(&idev->lock);
610out2: 610out2:
@@ -1909,11 +1909,11 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1909 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 1909 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
1910 1910
1911 if (!IS_ERR(ifp)) { 1911 if (!IS_ERR(ifp)) {
1912 spin_lock(&ifp->lock); 1912 spin_lock_bh(&ifp->lock);
1913 ifp->valid_lft = valid_lft; 1913 ifp->valid_lft = valid_lft;
1914 ifp->prefered_lft = prefered_lft; 1914 ifp->prefered_lft = prefered_lft;
1915 ifp->tstamp = jiffies; 1915 ifp->tstamp = jiffies;
1916 spin_unlock(&ifp->lock); 1916 spin_unlock_bh(&ifp->lock);
1917 1917
1918 addrconf_dad_start(ifp, 0); 1918 addrconf_dad_start(ifp, 0);
1919 in6_ifa_put(ifp); 1919 in6_ifa_put(ifp);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1044b6fce0d..3d6e9a35115 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -712,6 +712,11 @@ discard_it:
712 return 0; 712 return 0;
713} 713}
714 714
715/*
716 * Special lock-class for __icmpv6_socket:
717 */
718static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
719
715int __init icmpv6_init(struct net_proto_family *ops) 720int __init icmpv6_init(struct net_proto_family *ops)
716{ 721{
717 struct sock *sk; 722 struct sock *sk;
@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
730 735
731 sk = per_cpu(__icmpv6_socket, i)->sk; 736 sk = per_cpu(__icmpv6_socket, i)->sk;
732 sk->sk_allocation = GFP_ATOMIC; 737 sk->sk_allocation = GFP_ATOMIC;
738 /*
739 * Split off their lock-class, because sk->sk_dst_lock
740 * gets used from softirqs, which is safe for
741 * __icmpv6_socket (because those never get directly used
742 * via userspace syscalls), but unsafe for normal sockets.
743 */
744 lockdep_set_class(&sk->sk_dst_lock,
745 &icmpv6_socket_sk_dst_lock_key);
733 746
734 /* Enough space for 2 64K ICMP packets, including 747 /* Enough space for 2 64K ICMP packets, including
735 * sk_buff struct overhead. 748 * sk_buff struct overhead.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 69451af6abe..4fb47a25291 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1095,7 +1095,7 @@ alloc_new_skb:
1095 skb_prev->csum = csum_sub(skb_prev->csum, 1095 skb_prev->csum = csum_sub(skb_prev->csum,
1096 skb->csum); 1096 skb->csum);
1097 data += fraggap; 1097 data += fraggap;
1098 skb_trim(skb_prev, maxfraglen); 1098 pskb_trim_unique(skb_prev, maxfraglen);
1099 } 1099 }
1100 copy = datalen - transhdrlen - fraggap; 1100 copy = datalen - transhdrlen - fraggap;
1101 if (copy < 0) { 1101 if (copy < 0) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9d697d4dcff..639eb20c9f1 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { 268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
269 struct inet6_dev *idev = in6_dev_get(dev); 269 struct inet6_dev *idev = in6_dev_get(dev);
270 270
271 (void) ip6_mc_leave_src(sk, mc_lst, idev);
271 if (idev) { 272 if (idev) {
272 (void) ip6_mc_leave_src(sk,mc_lst,idev);
273 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 273 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
274 in6_dev_put(idev); 274 in6_dev_put(idev);
275 } 275 }
276 dev_put(dev); 276 dev_put(dev);
277 } 277 } else
278 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
278 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 279 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
279 return 0; 280 return 0;
280 } 281 }
@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
334 if (dev) { 335 if (dev) {
335 struct inet6_dev *idev = in6_dev_get(dev); 336 struct inet6_dev *idev = in6_dev_get(dev);
336 337
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
337 if (idev) { 339 if (idev) {
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
339 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 340 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
340 in6_dev_put(idev); 341 in6_dev_put(idev);
341 } 342 }
342 dev_put(dev); 343 dev_put(dev);
343 } 344 } else
345 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
344 346
345 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 347 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
346 348
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index f26898b0034..c9d6b23cd3f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void)
1398{ 1398{
1399 int ret; 1399 int ret;
1400 1400
1401 xt_proto_init(AF_INET6); 1401 ret = xt_proto_init(AF_INET6);
1402 if (ret < 0)
1403 goto err1;
1402 1404
1403 /* Noone else will be downing sem now, so we won't sleep */ 1405 /* Noone else will be downing sem now, so we won't sleep */
1404 xt_register_target(&ip6t_standard_target); 1406 ret = xt_register_target(&ip6t_standard_target);
1405 xt_register_target(&ip6t_error_target); 1407 if (ret < 0)
1406 xt_register_match(&icmp6_matchstruct); 1408 goto err2;
1409 ret = xt_register_target(&ip6t_error_target);
1410 if (ret < 0)
1411 goto err3;
1412 ret = xt_register_match(&icmp6_matchstruct);
1413 if (ret < 0)
1414 goto err4;
1407 1415
1408 /* Register setsockopt */ 1416 /* Register setsockopt */
1409 ret = nf_register_sockopt(&ip6t_sockopts); 1417 ret = nf_register_sockopt(&ip6t_sockopts);
1410 if (ret < 0) { 1418 if (ret < 0)
1411 duprintf("Unable to register sockopts.\n"); 1419 goto err5;
1412 xt_proto_fini(AF_INET6);
1413 return ret;
1414 }
1415 1420
1416 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); 1421 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1417 return 0; 1422 return 0;
1423
1424err5:
1425 xt_unregister_match(&icmp6_matchstruct);
1426err4:
1427 xt_unregister_target(&ip6t_error_target);
1428err3:
1429 xt_unregister_target(&ip6t_standard_target);
1430err2:
1431 xt_proto_fini(AF_INET6);
1432err1:
1433 return ret;
1418} 1434}
1419 1435
1420static void __exit ip6_tables_fini(void) 1436static void __exit ip6_tables_fini(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4b163711f3a..d9baca062d2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1532,6 +1532,10 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1532 1532
1533static int ip6_pkt_discard(struct sk_buff *skb) 1533static int ip6_pkt_discard(struct sk_buff *skb)
1534{ 1534{
1535 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1536 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1537 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
1538
1535 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 1539 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1536 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); 1540 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
1537 kfree_skb(skb); 1541 kfree_skb(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b843a650be7..802a1a6b103 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -944,7 +944,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
944 * comment in that function for the gory details. -acme 944 * comment in that function for the gory details. -acme
945 */ 945 */
946 946
947 sk->sk_gso_type = SKB_GSO_TCPV6; 947 newsk->sk_gso_type = SKB_GSO_TCPV6;
948 __ip6_dst_store(newsk, dst, NULL); 948 __ip6_dst_store(newsk, dst, NULL);
949 949
950 newtcp6sk = (struct tcp6_sock *)newsk; 950 newtcp6sk = (struct tcp6_sock *)newsk;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index aa34ff4b707..bef3f61569f 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1643 goto out; 1643 goto out;
1644 1644
1645 ipx = ipx_hdr(skb); 1645 if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
1646 ipx_pktsize = ntohs(ipx->ipx_pktsize); 1646 goto drop;
1647
1648 ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
1647 1649
1648 /* Too small or invalid header? */ 1650 /* Too small or invalid header? */
1649 if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) 1651 if (ipx_pktsize < sizeof(struct ipxhdr) ||
1652 !pskb_may_pull(skb, ipx_pktsize))
1650 goto drop; 1653 goto drop;
1651 1654
1655 ipx = ipx_hdr(skb);
1652 if (ipx->ipx_checksum != IPX_NO_CHECKSUM && 1656 if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
1653 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) 1657 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
1654 goto drop; 1658 goto drop;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 42eb0c3a978..61cb8cf7d15 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap,
330 if (llc->laddr.lsap != laddr->lsap) 330 if (llc->laddr.lsap != laddr->lsap)
331 continue; 331 continue;
332 332
333 if (llc->dev != skb->dev)
334 continue;
335
333 skb1 = skb_clone(skb, GFP_ATOMIC); 336 skb1 = skb_clone(skb, GFP_ATOMIC);
334 if (!skb1) 337 if (!skb1)
335 break; 338 break;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index af4845971f7..6527d4e048d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
429 cb->args[0], *id); 429 cb->args[0], *id);
430 430
431 read_lock_bh(&nf_conntrack_lock); 431 read_lock_bh(&nf_conntrack_lock);
432 last = (struct nf_conn *)cb->args[1];
432 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 433 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
433restart: 434restart:
434 last = (struct nf_conn *)cb->args[1];
435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
436 h = (struct nf_conntrack_tuple_hash *) i; 436 h = (struct nf_conntrack_tuple_hash *) i;
437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -442,13 +442,10 @@ restart:
442 * then dump everything. */ 442 * then dump everything. */
443 if (l3proto && L3PROTO(ct) != l3proto) 443 if (l3proto && L3PROTO(ct) != l3proto)
444 continue; 444 continue;
445 if (last != NULL) { 445 if (cb->args[1]) {
446 if (ct == last) { 446 if (ct != last)
447 nf_ct_put(last);
448 cb->args[1] = 0;
449 last = NULL;
450 } else
451 continue; 447 continue;
448 cb->args[1] = 0;
452 } 449 }
453 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 450 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
454 cb->nlh->nlmsg_seq, 451 cb->nlh->nlmsg_seq,
@@ -459,17 +456,17 @@ restart:
459 goto out; 456 goto out;
460 } 457 }
461 } 458 }
462 if (last != NULL) { 459 if (cb->args[1]) {
463 nf_ct_put(last);
464 cb->args[1] = 0; 460 cb->args[1] = 0;
465 goto restart; 461 goto restart;
466 } 462 }
467 } 463 }
468out: 464out:
469 read_unlock_bh(&nf_conntrack_lock); 465 read_unlock_bh(&nf_conntrack_lock);
466 if (last)
467 nf_ct_put(last);
470 468
471 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 469 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
472
473 return skb->len; 470 return skb->len;
474} 471}
475 472
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 61cdda4e5d3..b59d3b2bde2 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst)
366 if (timer_pending(&inst->timer)) 366 if (timer_pending(&inst->timer))
367 del_timer(&inst->timer); 367 del_timer(&inst->timer);
368 368
369 if (!inst->skb)
370 return 0;
371
369 if (inst->qlen > 1) 372 if (inst->qlen > 1)
370 inst->lastnlh->nlmsg_type = NLMSG_DONE; 373 inst->lastnlh->nlmsg_type = NLMSG_DONE;
371 374
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index a9f4f6f3c62..63a96546746 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/netfilter_bridge.h>
13#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
14#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index d8e3891b5f8..275330fcdaa 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb,
37 37
38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
39 conf->to_offset, conf->config, &state) 39 conf->to_offset, conf->config, &state)
40 != UINT_MAX) && !conf->invert; 40 != UINT_MAX) ^ conf->invert;
41} 41}
42 42
43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) 43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b85c1f9f128..8b85036ba8e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1273,8 +1273,7 @@ netlink_kernel_create(int unit, unsigned int groups,
1273 struct netlink_sock *nlk; 1273 struct netlink_sock *nlk;
1274 unsigned long *listeners = NULL; 1274 unsigned long *listeners = NULL;
1275 1275
1276 if (!nl_table) 1276 BUG_ON(!nl_table);
1277 return NULL;
1278 1277
1279 if (unit<0 || unit>=MAX_LINKS) 1278 if (unit<0 || unit>=MAX_LINKS)
1280 return NULL; 1279 return NULL;
@@ -1745,11 +1744,8 @@ static int __init netlink_proto_init(void)
1745 netlink_skb_parms_too_large(); 1744 netlink_skb_parms_too_large();
1746 1745
1747 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 1746 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1748 if (!nl_table) { 1747 if (!nl_table)
1749enomem: 1748 goto panic;
1750 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1751 return -ENOMEM;
1752 }
1753 1749
1754 if (num_physpages >= (128 * 1024)) 1750 if (num_physpages >= (128 * 1024))
1755 max = num_physpages >> (21 - PAGE_SHIFT); 1751 max = num_physpages >> (21 - PAGE_SHIFT);
@@ -1769,7 +1765,7 @@ enomem:
1769 nl_pid_hash_free(nl_table[i].hash.table, 1765 nl_pid_hash_free(nl_table[i].hash.table,
1770 1 * sizeof(*hash->table)); 1766 1 * sizeof(*hash->table));
1771 kfree(nl_table); 1767 kfree(nl_table);
1772 goto enomem; 1768 goto panic;
1773 } 1769 }
1774 memset(hash->table, 0, 1 * sizeof(*hash->table)); 1770 memset(hash->table, 0, 1 * sizeof(*hash->table));
1775 hash->max_shift = order; 1771 hash->max_shift = order;
@@ -1786,6 +1782,8 @@ enomem:
1786 rtnetlink_init(); 1782 rtnetlink_init();
1787out: 1783out:
1788 return err; 1784 return err;
1785panic:
1786 panic("netlink_init: Cannot allocate nl_table\n");
1789} 1787}
1790 1788
1791core_initcall(netlink_proto_init); 1789core_initcall(netlink_proto_init);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index eea36696674..0a6cfa0005b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -796,7 +796,7 @@ static int __init init_u32(void)
796{ 796{
797 printk("u32 classifier\n"); 797 printk("u32 classifier\n");
798#ifdef CONFIG_CLS_U32_PERF 798#ifdef CONFIG_CLS_U32_PERF
799 printk(" Perfomance counters on\n"); 799 printk(" Performance counters on\n");
800#endif 800#endif
801#ifdef CONFIG_NET_CLS_POLICE 801#ifdef CONFIG_NET_CLS_POLICE
802 printk(" OLD policer on \n"); 802 printk(" OLD policer on \n");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4f11f585820..17b509282cf 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -806,38 +806,26 @@ no_mem:
806 806
807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ 807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, 808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
809 const struct sctp_chunk *chunk, 809 const struct msghdr *msg,
810 const struct msghdr *msg) 810 size_t paylen)
811{ 811{
812 struct sctp_chunk *retval; 812 struct sctp_chunk *retval;
813 void *payload = NULL, *payoff; 813 void *payload = NULL;
814 size_t paylen = 0; 814 int err;
815 struct iovec *iov = NULL;
816 int iovlen = 0;
817
818 if (msg) {
819 iov = msg->msg_iov;
820 iovlen = msg->msg_iovlen;
821 paylen = get_user_iov_size(iov, iovlen);
822 }
823 815
824 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); 816 retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
825 if (!retval) 817 if (!retval)
826 goto err_chunk; 818 goto err_chunk;
827 819
828 if (paylen) { 820 if (paylen) {
829 /* Put the msg_iov together into payload. */ 821 /* Put the msg_iov together into payload. */
830 payload = kmalloc(paylen, GFP_ATOMIC); 822 payload = kmalloc(paylen, GFP_KERNEL);
831 if (!payload) 823 if (!payload)
832 goto err_payload; 824 goto err_payload;
833 payoff = payload;
834 825
835 for (; iovlen > 0; --iovlen) { 826 err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
836 if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) 827 if (err < 0)
837 goto err_copy; 828 goto err_copy;
838 payoff += iov->iov_len;
839 iov++;
840 }
841 } 829 }
842 830
843 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 831 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ead3f1b0ea3..5b5ae795832 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4031,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4031 * from its upper layer, but retransmits data to the far end 4031 * from its upper layer, but retransmits data to the far end
4032 * if necessary to fill gaps. 4032 * if necessary to fill gaps.
4033 */ 4033 */
4034 struct msghdr *msg = arg; 4034 struct sctp_chunk *abort = arg;
4035 struct sctp_chunk *abort;
4036 sctp_disposition_t retval; 4035 sctp_disposition_t retval;
4037 4036
4038 retval = SCTP_DISPOSITION_CONSUME; 4037 retval = SCTP_DISPOSITION_CONSUME;
4039 4038
4040 /* Generate ABORT chunk to send the peer. */ 4039 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4041 abort = sctp_make_abort_user(asoc, NULL, msg);
4042 if (!abort)
4043 retval = SCTP_DISPOSITION_NOMEM;
4044 else
4045 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4046 4040
4047 /* Even if we can't send the ABORT due to low memory delete the 4041 /* Even if we can't send the ABORT due to low memory delete the
4048 * TCB. This is a departure from our typical NOMEM handling. 4042 * TCB. This is a departure from our typical NOMEM handling.
@@ -4166,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4166 void *arg, 4160 void *arg,
4167 sctp_cmd_seq_t *commands) 4161 sctp_cmd_seq_t *commands)
4168{ 4162{
4169 struct msghdr *msg = arg; 4163 struct sctp_chunk *abort = arg;
4170 struct sctp_chunk *abort;
4171 sctp_disposition_t retval; 4164 sctp_disposition_t retval;
4172 4165
4173 /* Stop T1-init timer */ 4166 /* Stop T1-init timer */
@@ -4175,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4175 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4168 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4176 retval = SCTP_DISPOSITION_CONSUME; 4169 retval = SCTP_DISPOSITION_CONSUME;
4177 4170
4178 /* Generate ABORT chunk to send the peer */ 4171 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4179 abort = sctp_make_abort_user(asoc, NULL, msg);
4180 if (!abort)
4181 retval = SCTP_DISPOSITION_NOMEM;
4182 else
4183 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4184 4172
4185 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4173 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4186 SCTP_STATE(SCTP_STATE_CLOSED)); 4174 SCTP_STATE(SCTP_STATE_CLOSED));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 54722e622e6..dab15949958 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1289,9 +1289,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1289 } 1289 }
1290 } 1290 }
1291 1291
1292 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) 1292 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1293 sctp_primitive_ABORT(asoc, NULL); 1293 struct sctp_chunk *chunk;
1294 else 1294
1295 chunk = sctp_make_abort_user(asoc, NULL, 0);
1296 if (chunk)
1297 sctp_primitive_ABORT(asoc, chunk);
1298 } else
1295 sctp_primitive_SHUTDOWN(asoc, NULL); 1299 sctp_primitive_SHUTDOWN(asoc, NULL);
1296 } 1300 }
1297 1301
@@ -1520,8 +1524,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1520 goto out_unlock; 1524 goto out_unlock;
1521 } 1525 }
1522 if (sinfo_flags & SCTP_ABORT) { 1526 if (sinfo_flags & SCTP_ABORT) {
1527 struct sctp_chunk *chunk;
1528
1529 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1530 if (!chunk) {
1531 err = -ENOMEM;
1532 goto out_unlock;
1533 }
1534
1523 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); 1535 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
1524 sctp_primitive_ABORT(asoc, msg); 1536 sctp_primitive_ABORT(asoc, chunk);
1525 err = 0; 1537 err = 0;
1526 goto out_unlock; 1538 goto out_unlock;
1527 } 1539 }
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 4a9aa9393b9..ef1cf5b476c 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -718,8 +718,7 @@ gss_destroy(struct rpc_auth *auth)
718 auth, auth->au_flavor); 718 auth, auth->au_flavor);
719 719
720 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 720 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
721 rpc_unlink(gss_auth->path); 721 rpc_unlink(gss_auth->dentry);
722 dput(gss_auth->dentry);
723 gss_auth->dentry = NULL; 722 gss_auth->dentry = NULL;
724 gss_mech_put(gss_auth->mech); 723 gss_mech_put(gss_auth->mech);
725 724
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d6409e75721..3e19d321067 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -183,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
183 183
184out_no_auth: 184out_no_auth:
185 if (!IS_ERR(clnt->cl_dentry)) { 185 if (!IS_ERR(clnt->cl_dentry)) {
186 rpc_rmdir(clnt->cl_pathname); 186 rpc_rmdir(clnt->cl_dentry);
187 dput(clnt->cl_dentry);
188 rpc_put_mount(); 187 rpc_put_mount();
189 } 188 }
190out_no_path: 189out_no_path:
@@ -251,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
251 new->cl_autobind = 0; 250 new->cl_autobind = 0;
252 new->cl_oneshot = 0; 251 new->cl_oneshot = 0;
253 new->cl_dead = 0; 252 new->cl_dead = 0;
254 if (!IS_ERR(new->cl_dentry)) { 253 if (!IS_ERR(new->cl_dentry))
255 dget(new->cl_dentry); 254 dget(new->cl_dentry);
256 rpc_get_mount();
257 }
258 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 255 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
259 if (new->cl_auth) 256 if (new->cl_auth)
260 atomic_inc(&new->cl_auth->au_count); 257 atomic_inc(&new->cl_auth->au_count);
@@ -317,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt)
317 clnt->cl_auth = NULL; 314 clnt->cl_auth = NULL;
318 } 315 }
319 if (clnt->cl_parent != clnt) { 316 if (clnt->cl_parent != clnt) {
317 if (!IS_ERR(clnt->cl_dentry))
318 dput(clnt->cl_dentry);
320 rpc_destroy_client(clnt->cl_parent); 319 rpc_destroy_client(clnt->cl_parent);
321 goto out_free; 320 goto out_free;
322 } 321 }
323 if (clnt->cl_pathname[0]) 322 if (!IS_ERR(clnt->cl_dentry)) {
324 rpc_rmdir(clnt->cl_pathname); 323 rpc_rmdir(clnt->cl_dentry);
324 rpc_put_mount();
325 }
325 if (clnt->cl_xprt) { 326 if (clnt->cl_xprt) {
326 xprt_destroy(clnt->cl_xprt); 327 xprt_destroy(clnt->cl_xprt);
327 clnt->cl_xprt = NULL; 328 clnt->cl_xprt = NULL;
@@ -331,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt)
331out_free: 332out_free:
332 rpc_free_iostats(clnt->cl_metrics); 333 rpc_free_iostats(clnt->cl_metrics);
333 clnt->cl_metrics = NULL; 334 clnt->cl_metrics = NULL;
334 if (!IS_ERR(clnt->cl_dentry)) {
335 dput(clnt->cl_dentry);
336 rpc_put_mount();
337 }
338 kfree(clnt); 335 kfree(clnt);
339 return 0; 336 return 0;
340} 337}
@@ -1184,6 +1181,17 @@ call_verify(struct rpc_task *task)
1184 u32 *p = iov->iov_base, n; 1181 u32 *p = iov->iov_base, n;
1185 int error = -EACCES; 1182 int error = -EACCES;
1186 1183
1184 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1185 /* RFC-1014 says that the representation of XDR data must be a
1186 * multiple of four bytes
1187 * - if it isn't pointer subtraction in the NFS client may give
1188 * undefined results
1189 */
1190 printk(KERN_WARNING
1191 "call_verify: XDR representation not a multiple of"
1192 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
1193 goto out_eio;
1194 }
1187 if ((len -= 3) < 0) 1195 if ((len -= 3) < 0)
1188 goto out_overflow; 1196 goto out_overflow;
1189 p += 1; /* skip XID */ 1197 p += 1; /* skip XID */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a3bd2db2e02..0b1a1ac8a4b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -539,6 +539,7 @@ repeat:
539 rpc_close_pipes(dentry->d_inode); 539 rpc_close_pipes(dentry->d_inode);
540 simple_unlink(dir, dentry); 540 simple_unlink(dir, dentry);
541 } 541 }
542 inode_dir_notify(dir, DN_DELETE);
542 dput(dentry); 543 dput(dentry);
543 } while (n); 544 } while (n);
544 goto repeat; 545 goto repeat;
@@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
610 int error; 611 int error;
611 612
612 shrink_dcache_parent(dentry); 613 shrink_dcache_parent(dentry);
613 if (dentry->d_inode) 614 if (d_unhashed(dentry))
614 rpc_close_pipes(dentry->d_inode); 615 return 0;
615 if ((error = simple_rmdir(dir, dentry)) != 0) 616 if ((error = simple_rmdir(dir, dentry)) != 0)
616 return error; 617 return error;
617 if (!error) { 618 if (!error) {
@@ -684,28 +685,20 @@ err_dput:
684} 685}
685 686
686int 687int
687rpc_rmdir(char *path) 688rpc_rmdir(struct dentry *dentry)
688{ 689{
689 struct nameidata nd; 690 struct dentry *parent;
690 struct dentry *dentry;
691 struct inode *dir; 691 struct inode *dir;
692 int error; 692 int error;
693 693
694 if ((error = rpc_lookup_parent(path, &nd)) != 0) 694 parent = dget_parent(dentry);
695 return error; 695 dir = parent->d_inode;
696 dir = nd.dentry->d_inode;
697 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
698 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
699 if (IS_ERR(dentry)) {
700 error = PTR_ERR(dentry);
701 goto out_release;
702 }
703 rpc_depopulate(dentry); 697 rpc_depopulate(dentry);
704 error = __rpc_rmdir(dir, dentry); 698 error = __rpc_rmdir(dir, dentry);
705 dput(dentry); 699 dput(dentry);
706out_release:
707 mutex_unlock(&dir->i_mutex); 700 mutex_unlock(&dir->i_mutex);
708 rpc_release_path(&nd); 701 dput(parent);
709 return error; 702 return error;
710} 703}
711 704
@@ -746,32 +739,26 @@ err_dput:
746} 739}
747 740
748int 741int
749rpc_unlink(char *path) 742rpc_unlink(struct dentry *dentry)
750{ 743{
751 struct nameidata nd; 744 struct dentry *parent;
752 struct dentry *dentry;
753 struct inode *dir; 745 struct inode *dir;
754 int error; 746 int error = 0;
755 747
756 if ((error = rpc_lookup_parent(path, &nd)) != 0) 748 parent = dget_parent(dentry);
757 return error; 749 dir = parent->d_inode;
758 dir = nd.dentry->d_inode;
759 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 750 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
760 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 751 if (!d_unhashed(dentry)) {
761 if (IS_ERR(dentry)) { 752 d_drop(dentry);
762 error = PTR_ERR(dentry); 753 if (dentry->d_inode) {
763 goto out_release; 754 rpc_close_pipes(dentry->d_inode);
764 } 755 error = simple_unlink(dir, dentry);
765 d_drop(dentry); 756 }
766 if (dentry->d_inode) { 757 inode_dir_notify(dir, DN_DELETE);
767 rpc_close_pipes(dentry->d_inode);
768 error = simple_unlink(dir, dentry);
769 } 758 }
770 dput(dentry); 759 dput(dentry);
771 inode_dir_notify(dir, DN_DELETE);
772out_release:
773 mutex_unlock(&dir->i_mutex); 760 mutex_unlock(&dir->i_mutex);
774 rpc_release_path(&nd); 761 dput(parent);
775 return error; 762 return error;
776} 763}
777 764
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f35bc676128..3da67ca2c3c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1134,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1134} 1134}
1135EXPORT_SYMBOL(__xfrm_route_forward); 1135EXPORT_SYMBOL(__xfrm_route_forward);
1136 1136
1137/* Optimize later using cookies and generation ids. */
1138
1137static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1139static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1138{ 1140{
1139 /* If it is marked obsolete, which is how we even get here, 1141 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1140 * then we have purged it from the policy bundle list and we 1142 * to "-1" to force all XFRM destinations to get validated by
1141 * did that for a good reason. 1143 * dst_ops->check on every use. We do this because when a
1144 * normal route referenced by an XFRM dst is obsoleted we do
1145 * not go looking around for all parent referencing XFRM dsts
1146 * so that we can invalidate them. It is just too much work.
1147 * Instead we make the checks here on every use. For example:
1148 *
1149 * XFRM dst A --> IPv4 dst X
1150 *
1151 * X is the "xdst->route" of A (X is also the "dst->path" of A
1152 * in this example). If X is marked obsolete, "A" will not
1153 * notice. That's what we are validating here via the
1154 * stale_bundle() check.
1155 *
1156 * When a policy's bundle is pruned, we dst_free() the XFRM
1157 * dst which causes it's ->obsolete field to be set to a
1158 * positive non-zero integer. If an XFRM dst has been pruned
1159 * like this, we want to force a new route lookup.
1142 */ 1160 */
1161 if (dst->obsolete < 0 && !stale_bundle(dst))
1162 return dst;
1163
1143 return NULL; 1164 return NULL;
1144} 1165}
1145 1166
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 44312926b84..e2de650d3db 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -391,7 +391,7 @@ static void do_input(char *alias,
391 unsigned int i; 391 unsigned int i;
392 392
393 for (i = min; i < max; i++) 393 for (i = min; i < max; i++)
394 if (arr[i / BITS_PER_LONG] & (1 << (i%BITS_PER_LONG))) 394 if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG)))
395 sprintf(alias + strlen(alias), "%X,*", i); 395 sprintf(alias + strlen(alias), "%X,*", i);
396} 396}
397 397
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index f4980ca5c05..1b7c3dfc2b4 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -31,7 +31,7 @@ config SOUND_EMU10K1
31 For more information on this driver and the degree of support for 31 For more information on this driver and the degree of support for
32 the different card models please check: 32 the different card models please check:
33 33
34 <http://sourceforge.net/projects/emu10k1/> 34 <http://sourceforge.net/projects/emu10k1/>
35 35
36 It is now possible to load dsp microcode patches into the EMU10K1 36 It is now possible to load dsp microcode patches into the EMU10K1
37 chip. These patches are used to implement real time sound 37 chip. These patches are used to implement real time sound
@@ -140,7 +140,7 @@ config SOUND_TRIDENT
140 system support" and "Sysctl support", and after the /proc file 140 system support" and "Sysctl support", and after the /proc file
141 system has been mounted, executing the command 141 system has been mounted, executing the command
142 142
143 command what is enabled 143 command what is enabled
144 144
145 echo 0>/proc/ALi5451 pcm out is also set to S/PDIF out. (Default). 145 echo 0>/proc/ALi5451 pcm out is also set to S/PDIF out. (Default).
146 146
@@ -838,7 +838,7 @@ config SOUND_WAVEARTIST
838 838
839config SOUND_TVMIXER 839config SOUND_TVMIXER
840 tristate "TV card (bt848) mixer support" 840 tristate "TV card (bt848) mixer support"
841 depends on SOUND_PRIME && I2C 841 depends on SOUND_PRIME && I2C && VIDEO_V4L1
842 help 842 help
843 Support for audio mixer facilities on the BT848 TV frame-grabber 843 Support for audio mixer facilities on the BT848 TV frame-grabber
844 card. 844 card.
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index d7ad32f514d..e49c0fe21b0 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -16,16 +16,16 @@ config SND_AD1889
16 will be called snd-ad1889. 16 will be called snd-ad1889.
17 17
18config SND_ALS300 18config SND_ALS300
19 tristate "Avance Logic ALS300/ALS300+" 19 tristate "Avance Logic ALS300/ALS300+"
20 depends on SND 20 depends on SND
21 select SND_PCM 21 select SND_PCM
22 select SND_AC97_CODEC 22 select SND_AC97_CODEC
23 select SND_OPL3_LIB 23 select SND_OPL3_LIB
24 help 24 help
25 Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+ 25 Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
26 26
27 To compile this driver as a module, choose M here: the module 27 To compile this driver as a module, choose M here: the module
28 will be called snd-als300 28 will be called snd-als300
29 29
30config SND_ALS4000 30config SND_ALS4000
31 tristate "Avance Logic ALS4000" 31 tristate "Avance Logic ALS4000"
@@ -78,49 +78,49 @@ config SND_ATIIXP_MODEM
78 will be called snd-atiixp-modem. 78 will be called snd-atiixp-modem.
79 79
80config SND_AU8810 80config SND_AU8810
81 tristate "Aureal Advantage" 81 tristate "Aureal Advantage"
82 depends on SND 82 depends on SND
83 select SND_MPU401_UART 83 select SND_MPU401_UART
84 select SND_AC97_CODEC 84 select SND_AC97_CODEC
85 help 85 help
86 Say Y here to include support for Aureal Advantage soundcards. 86 Say Y here to include support for Aureal Advantage soundcards.
87 87
88 Supported features: Hardware Mixer, SRC, EQ and SPDIF output. 88 Supported features: Hardware Mixer, SRC, EQ and SPDIF output.
89 3D support code is in place, but not yet useable. For more info, 89 3D support code is in place, but not yet useable. For more info,
90 email the ALSA developer list, or <mjander@users.sourceforge.net>. 90 email the ALSA developer list, or <mjander@users.sourceforge.net>.
91 91
92 To compile this driver as a module, choose M here: the module 92 To compile this driver as a module, choose M here: the module
93 will be called snd-au8810. 93 will be called snd-au8810.
94 94
95config SND_AU8820 95config SND_AU8820
96 tristate "Aureal Vortex" 96 tristate "Aureal Vortex"
97 depends on SND 97 depends on SND
98 select SND_MPU401_UART 98 select SND_MPU401_UART
99 select SND_AC97_CODEC 99 select SND_AC97_CODEC
100 help 100 help
101 Say Y here to include support for Aureal Vortex soundcards. 101 Say Y here to include support for Aureal Vortex soundcards.
102 102
103 Supported features: Hardware Mixer and SRC. For more info, email 103 Supported features: Hardware Mixer and SRC. For more info, email
104 the ALSA developer list, or <mjander@users.sourceforge.net>. 104 the ALSA developer list, or <mjander@users.sourceforge.net>.
105 105
106 To compile this driver as a module, choose M here: the module 106 To compile this driver as a module, choose M here: the module
107 will be called snd-au8820. 107 will be called snd-au8820.
108 108
109config SND_AU8830 109config SND_AU8830
110 tristate "Aureal Vortex 2" 110 tristate "Aureal Vortex 2"
111 depends on SND 111 depends on SND
112 select SND_MPU401_UART 112 select SND_MPU401_UART
113 select SND_AC97_CODEC 113 select SND_AC97_CODEC
114 help 114 help
115 Say Y here to include support for Aureal Vortex 2 soundcards. 115 Say Y here to include support for Aureal Vortex 2 soundcards.
116 116
117 Supported features: Hardware Mixer, SRC, EQ and SPDIF output. 117 Supported features: Hardware Mixer, SRC, EQ and SPDIF output.
118 3D support code is in place, but not yet useable. For more info, 118 3D support code is in place, but not yet useable. For more info,
119 email the ALSA developer list, or <mjander@users.sourceforge.net>. 119 email the ALSA developer list, or <mjander@users.sourceforge.net>.
120 120
121 To compile this driver as a module, choose M here: the module 121 To compile this driver as a module, choose M here: the module
122 will be called snd-au8830. 122 will be called snd-au8830.
123 123
124config SND_AZT3328 124config SND_AZT3328
125 tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)" 125 tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)"
126 depends on SND && EXPERIMENTAL 126 depends on SND && EXPERIMENTAL
@@ -135,10 +135,10 @@ config SND_AZT3328
135 will be called snd-azt3328. 135 will be called snd-azt3328.
136 136
137config SND_BT87X 137config SND_BT87X
138 tristate "Bt87x Audio Capture" 138 tristate "Bt87x Audio Capture"
139 depends on SND 139 depends on SND
140 select SND_PCM 140 select SND_PCM
141 help 141 help
142 If you want to record audio from TV cards based on 142 If you want to record audio from TV cards based on
143 Brooktree Bt878/Bt879 chips, say Y here and read 143 Brooktree Bt878/Bt879 chips, say Y here and read
144 <file:Documentation/sound/alsa/Bt87x.txt>. 144 <file:Documentation/sound/alsa/Bt87x.txt>.
@@ -209,7 +209,7 @@ config SND_CS46XX
209config SND_CS46XX_NEW_DSP 209config SND_CS46XX_NEW_DSP
210 bool "Cirrus Logic (Sound Fusion) New DSP support" 210 bool "Cirrus Logic (Sound Fusion) New DSP support"
211 depends on SND_CS46XX 211 depends on SND_CS46XX
212 default y 212 default y
213 help 213 help
214 Say Y here to use a new DSP image for SPDIF and dual codecs. 214 Say Y here to use a new DSP image for SPDIF and dual codecs.
215 215
@@ -225,7 +225,7 @@ config SND_CS5535AUDIO
225 referred to as NS CS5535 IO or AMD CS5535 IO companion in 225 referred to as NS CS5535 IO or AMD CS5535 IO companion in
226 various literature. This driver also supports the CS5536 audio 226 various literature. This driver also supports the CS5536 audio
227 device. However, for both chips, on certain boards, you may 227 device. However, for both chips, on certain boards, you may
228 need to use ac97_quirk=hp_only if your board has physically 228 need to use ac97_quirk=hp_only if your board has physically
229 mapped headphone out to master output. If that works for you, 229 mapped headphone out to master output. If that works for you,
230 send lspci -vvv output to the mailing list so that your board 230 send lspci -vvv output to the mailing list so that your board
231 can be identified in the quirks list. 231 can be identified in the quirks list.
@@ -468,11 +468,13 @@ config SND_FM801_TEA575X_BOOL
468 FM801 chip with a TEA5757 tuner connected to GPIO1-3 pins (Media 468 FM801 chip with a TEA5757 tuner connected to GPIO1-3 pins (Media
469 Forte SF256-PCS-02) into the snd-fm801 driver. 469 Forte SF256-PCS-02) into the snd-fm801 driver.
470 470
471 This will enable support for the old V4L1 API.
472
471config SND_FM801_TEA575X 473config SND_FM801_TEA575X
472 tristate 474 tristate
473 depends on SND_FM801_TEA575X_BOOL 475 depends on SND_FM801_TEA575X_BOOL
474 default SND_FM801 476 default SND_FM801
475 select VIDEO_DEV 477 select VIDEO_V4L1
476 478
477config SND_HDA_INTEL 479config SND_HDA_INTEL
478 tristate "Intel HD Audio" 480 tristate "Intel HD Audio"
diff --git a/usr/Makefile b/usr/Makefile
index e93824269da..5b31c0b61c7 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -35,6 +35,9 @@ quiet_cmd_initfs = GEN $@
35 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 35 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
36 36
37targets := initramfs_data.cpio.gz 37targets := initramfs_data.cpio.gz
38# do not try to update files included in initramfs
39$(deps_initramfs): ;
40
38$(deps_initramfs): klibcdirs 41$(deps_initramfs): klibcdirs
39# We rebuild initramfs_data.cpio.gz if: 42# We rebuild initramfs_data.cpio.gz if:
40# 1) Any included file is newer then initramfs_data.cpio.gz 43# 1) Any included file is newer then initramfs_data.cpio.gz